static inline void
adjust_tcp_sequence(u32 seq,
		    int sizediff,
		    struct nf_conn *ct,
		    enum ip_conntrack_info ctinfo)
{
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way = &nat->seq[dir];

	pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
		 seq, sizediff);

	pr_debug("adjust_tcp_sequence: Seq_offset before: ");
	DUMP_OFFSET(this_way);

	spin_lock_bh(&nf_nat_seqofs_lock);

	/*                                                         
                                                        
                                                               
               */
	if (this_way->offset_before == this_way->offset_after ||
	    before(this_way->correction_pos, seq)) {
		this_way->correction_pos = seq;
		this_way->offset_before = this_way->offset_after;
		this_way->offset_after += sizediff;
	}
	spin_unlock_bh(&nf_nat_seqofs_lock);

	pr_debug("adjust_tcp_sequence: Seq_offset after: ");
	DUMP_OFFSET(this_way);
}
/* Setup TCP sequence correction given this change at this sequence */
static inline void
adjust_tcp_sequence(u32 seq,
		    int sizediff,
		    struct nf_conn *ct,
		    enum ip_conntrack_info ctinfo)
{
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way = &nat->seq[dir];

	pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
		 seq, sizediff);

	pr_debug("adjust_tcp_sequence: Seq_offset before: ");
	DUMP_OFFSET(this_way);

	spin_lock_bh(&nf_nat_seqofs_lock);

	/* SYN adjust. If it's uninitialized, or this is after last
	 * correction, record it: we don't handle more than one
	 * adjustment in the window, but do deal with common case of a
	 * retransmit */
	if (this_way->offset_before == this_way->offset_after ||
	    before(this_way->correction_pos, seq)) {
		this_way->correction_pos = seq;
		this_way->offset_before = this_way->offset_after;
		this_way->offset_after += sizediff;
	}
	spin_unlock_bh(&nf_nat_seqofs_lock);

	pr_debug("adjust_tcp_sequence: Seq_offset after: ");
	DUMP_OFFSET(this_way);
}
static inline void
adjust_tcp_sequence(u32 seq,
		    int sizediff,
		    struct nf_conn *ct,
		    enum ip_conntrack_info ctinfo)
{
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way = &nat->seq[dir];

#ifdef CONFIG_HTC_NETWORK_MODIFY
	if (IS_ERR(this_way) || (!this_way))
		printk(KERN_ERR "[NET] this_way is NULL in %s!\n", __func__);
#endif

	pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
		 seq, sizediff);

	pr_debug("adjust_tcp_sequence: Seq_offset before: ");
	DUMP_OFFSET(this_way);

	spin_lock_bh(&nf_nat_seqofs_lock);

	if (this_way->offset_before == this_way->offset_after ||
	    before(this_way->correction_pos, seq)) {
		this_way->correction_pos = seq;
		this_way->offset_before = this_way->offset_after;
		this_way->offset_after += sizediff;
	}
	spin_unlock_bh(&nf_nat_seqofs_lock);

	pr_debug("adjust_tcp_sequence: Seq_offset after: ");
	DUMP_OFFSET(this_way);
}
static void
pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
	     struct nf_conntrack_expect *expect_reply)
{
	const struct nf_conn *ct = expect_orig->master;
	struct nf_ct_pptp_master *ct_pptp_info;
	struct nf_nat_pptp *nat_pptp_info;

	ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;

	
	nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;

	
	expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
	expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
	expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
	expect_orig->dir = IP_CT_DIR_ORIGINAL;

	
	expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
	expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
	expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
	expect_reply->dir = IP_CT_DIR_REPLY;
}
Exemple #5
0
static void
pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
             struct nf_conntrack_expect *expect_reply)
{
    const struct nf_conn *ct = expect_orig->master;
    struct nf_ct_pptp_master *ct_pptp_info;
    struct nf_nat_pptp *nat_pptp_info;

    ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
    nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;

#ifdef CONFIG_HTC_NETWORK_MODIFY
    if (IS_ERR(ct_pptp_info) || (!ct_pptp_info))
        printk(KERN_ERR "[NET] ct_pptp_info is NULL in %s!\n", __func__);

    if (IS_ERR(nat_pptp_info) || (!nat_pptp_info))
        printk(KERN_ERR "[NET] nat_pptp_info is NULL in %s!\n", __func__);
#endif

    /* save original PAC call ID in nat_info */
    nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;

    /* alter expectation for PNS->PAC direction */
    expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
    expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
    expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
    expect_orig->dir = IP_CT_DIR_ORIGINAL;

    /* alter expectation for PAC->PNS direction */
    expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
    expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
    expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
    expect_reply->dir = IP_CT_DIR_REPLY;
}
static void
pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
	     struct nf_conntrack_expect *expect_reply)
{
	const struct nf_conn *ct = expect_orig->master;
	struct nf_ct_pptp_master *ct_pptp_info;
	struct nf_nat_pptp *nat_pptp_info;

	ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;

	/* save original PAC call ID in nat_info */
	nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;

	/* alter expectation for PNS->PAC direction */
	expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
	expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
	expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
	expect_orig->dir = IP_CT_DIR_ORIGINAL;

	/* alter expectation for PAC->PNS direction */
	expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
	expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
	expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
	expect_reply->dir = IP_CT_DIR_REPLY;
}
static unsigned int
masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct nf_nat_range *range = par->targinfo;
	enum ip_conntrack_info ctinfo;
	struct in6_addr src;
	struct nf_conn *ct;
	struct nf_nat_range newrange;

	ct = nf_ct_get(skb, &ctinfo);
	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
			    ctinfo == IP_CT_RELATED_REPLY));

	if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
			       &ipv6_hdr(skb)->daddr, 0, &src) < 0)
		return NF_DROP;

	nfct_nat(ct)->masq_index = par->out->ifindex;

	newrange.flags		= range->flags | NF_NAT_RANGE_MAP_IPS;
	newrange.min_addr.in6	= src;
	newrange.max_addr.in6	= src;
	newrange.min_proto	= range->min_proto;
	newrange.max_proto	= range->max_proto;

	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}
Exemple #8
0
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
{
	struct nf_conn_nat *nat = nfct_nat(ct);

	if (nf_nat_proto_remove(ct, data))
		return 1;

	if (!nat)
		return 0;

	/* This netns is being destroyed, and conntrack has nat null binding.
	 * Remove it from bysource hash, as the table will be freed soon.
	 *
	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
	 * will delete entry from already-freed table.
	 */
	ct->status &= ~IPS_NAT_DONE_MASK;
	rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
			       nf_nat_bysource_params);

	/* don't delete conntrack.  Although that would make things a lot
	 * simpler, we'd end up flushing all conntracks on nat rmmod.
	 */
	return 0;
}
static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
			      struct sk_buff *skb,
			      const struct net_device *in,
			      const struct net_device *out,
			      int (*okfn)(struct sk_buff *))
{
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
	struct nf_conn_nat *nat;
	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
	unsigned int ret;

	if (ct == NULL || nf_ct_is_untracked(ct))
		return NF_ACCEPT;

	NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));

	nat = nfct_nat(ct);
	if (nat == NULL) {
		/* Conntrack module was loaded late, can't add extension. */
		if (nf_ct_is_confirmed(ct))
			return NF_ACCEPT;
		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
		if (nat == NULL)
			return NF_ACCEPT;
	}

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED + IP_CT_IS_REPLY:
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
							   ops->hooknum))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Fall through */
	case IP_CT_NEW:
		if (nf_nat_initialized(ct, maniptype))
			break;

		ret = nft_do_chain(ops, skb, in, out, okfn);
		if (ret != NF_ACCEPT)
			return ret;
		if (!nf_nat_initialized(ct, maniptype)) {
			ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
			if (ret != NF_ACCEPT)
				return ret;
		}
	default:
		break;
	}

	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
}
Exemple #10
0
static int device_cmp(struct nf_conn *i, void *ifindex)
{
	const struct nf_conn_nat *nat = nfct_nat(i);

	if (!nat)
		return 0;
	if (nf_ct_l3num(i) != NFPROTO_IPV4)
		return 0;
	return nat->masq_index == (int)(long)ifindex;
}
Exemple #11
0
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
{
	struct nf_conn_nat *nat = nfct_nat(ct);
	if (nat)
		return nat;

	if (!nf_ct_is_confirmed(ct))
		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);

	return nat;
}
int
nf_nat_seq_adjust(struct sk_buff *skb,
		  struct nf_conn *ct,
		  enum ip_conntrack_info ctinfo)
{
	struct tcphdr *tcph;
	int dir;
	__be32 newseq, newack;
	s16 seqoff, ackoff;
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way, *other_way;

	dir = CTINFO2DIR(ctinfo);

	this_way = &nat->seq[dir];
	other_way = &nat->seq[!dir];

#ifdef CONFIG_HTC_NETWORK_MODIFY
	if (IS_ERR(this_way) || (!this_way))
		printk(KERN_ERR "[NET] this_way is NULL in %s!\n", __func__);
#endif

	if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
		return 0;

	tcph = (void *)skb->data + ip_hdrlen(skb);
	if (after(ntohl(tcph->seq), this_way->correction_pos))
		seqoff = this_way->offset_after;
	else
		seqoff = this_way->offset_before;

	if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
		  other_way->correction_pos))
		ackoff = other_way->offset_after;
	else
		ackoff = other_way->offset_before;

	newseq = htonl(ntohl(tcph->seq) + seqoff);
	newack = htonl(ntohl(tcph->ack_seq) - ackoff);

	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);

	pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
		 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
		 ntohl(newack));

	tcph->seq = newseq;
	tcph->ack_seq = newack;

	return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
}
Exemple #13
0
/* kill conntracks with affected NAT section */
static int nf_nat_proto_remove(struct nf_conn *i, void *data)
{
	const struct nf_nat_proto_clean *clean = data;
	struct nf_conn_nat *nat = nfct_nat(i);

	if (!nat)
		return 0;

	if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
	    (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
		return 0;

	return i->status & IPS_NAT_MASK ? 1 : 0;
}
Exemple #14
0
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
		       const struct nf_nat_range *range,
		       const struct net_device *out)
{
	struct nf_conn *ct;
	struct nf_conn_nat *nat;
	enum ip_conntrack_info ctinfo;
	struct nf_nat_range newrange;
	const struct rtable *rt;
	__be32 newsrc, nh;

	NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING);

	ct = nf_ct_get(skb, &ctinfo);
	nat = nfct_nat(ct);

	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
			    ctinfo == IP_CT_RELATED_REPLY));

	/* Source address is 0.0.0.0 - locally generated packet that is
	 * probably not supposed to be masqueraded.
	 */
	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
		return NF_ACCEPT;

	rt = skb_rtable(skb);
	nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
	newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE);
	if (!newsrc) {
		pr_info("%s ate my IP address\n", out->name);
		return NF_DROP;
	}

	nat->masq_index = out->ifindex;

	/* Transfer from original range. */
	memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
	memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
	newrange.flags       = range->flags | NF_NAT_RANGE_MAP_IPS;
	newrange.min_addr.ip = newsrc;
	newrange.max_addr.ip = newsrc;
	newrange.min_proto   = range->min_proto;
	newrange.max_proto   = range->max_proto;

	/* Hand modified range to generic setup. */
	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}
/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
int
nf_nat_seq_adjust(struct sk_buff **pskb,
		  struct nf_conn *ct,
		  enum ip_conntrack_info ctinfo)
{
	struct tcphdr *tcph;
	int dir;
	__be32 newseq, newack;
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way, *other_way;

	dir = CTINFO2DIR(ctinfo);

	this_way = &nat->info.seq[dir];
	other_way = &nat->info.seq[!dir];

	if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
		return 0;

	tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
	if (after(ntohl(tcph->seq), this_way->correction_pos))
		newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
	else
		newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);

	if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
		  other_way->correction_pos))
		newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
	else
		newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);

	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);

	DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
		ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
		ntohl(newack));

	tcph->seq = newseq;
	tcph->ack_seq = newack;

	if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo))
		return 0;

	nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, ct, dir);

	return 1;
}
/* TCP SACK sequence number adjustment */
static inline unsigned int
nf_nat_sack_adjust(struct sk_buff **pskb,
		   struct tcphdr *tcph,
		   struct nf_conn *ct,
		   enum ip_conntrack_info ctinfo)
{
	unsigned int dir, optoff, optend;
	struct nf_conn_nat *nat = nfct_nat(ct);

	optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
	optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;

	if (!skb_make_writable(pskb, optend))
		return 0;

	dir = CTINFO2DIR(ctinfo);

	while (optoff < optend) {
		/* Usually: option, length. */
		unsigned char *op = (*pskb)->data + optoff;

		switch (op[0]) {
		case TCPOPT_EOL:
			return 1;
		case TCPOPT_NOP:
			optoff++;
			continue;
		default:
			/* no partial options */
			if (optoff + 1 == optend ||
			    optoff + op[1] > optend ||
			    op[1] < 2)
				return 0;
			if (op[0] == TCPOPT_SACK &&
			    op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
			    ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
				sack_adjust(*pskb, tcph, optoff+2,
					    optoff+op[1],
					    &nat->info.seq[!dir]);
			optoff += op[1];
		}
	}
	return 1;
}
static inline int
ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
{
	struct nf_nat_seq *natseq;
	struct nf_conn_nat *nat = nfct_nat(ct);

	if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
		return 0;

	natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
	if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
		return -1;

	natseq = &nat->seq[IP_CT_DIR_REPLY];
	if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
		return -1;

	return 0;
}
static inline unsigned int
nf_nat_sack_adjust(struct sk_buff *skb,
		   struct tcphdr *tcph,
		   struct nf_conn *ct,
		   enum ip_conntrack_info ctinfo)
{
	unsigned int dir, optoff, optend;
	struct nf_conn_nat *nat = nfct_nat(ct);

	optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
	optend = ip_hdrlen(skb) + tcph->doff * 4;

	if (!skb_make_writable(skb, optend))
		return 0;

	dir = CTINFO2DIR(ctinfo);

	while (optoff < optend) {
		
		unsigned char *op = skb->data + optoff;

		switch (op[0]) {
		case TCPOPT_EOL:
			return 1;
		case TCPOPT_NOP:
			optoff++;
			continue;
		default:
			
			if (optoff + 1 == optend ||
			    optoff + op[1] > optend ||
			    op[1] < 2)
				return 0;
			if (op[0] == TCPOPT_SACK &&
			    op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
			    ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
				sack_adjust(skb, tcph, optoff+2,
					    optoff+op[1], &nat->seq[!dir]);
			optoff += op[1];
		}
	}
	return 1;
}
s16 nf_nat_get_offset(const struct nf_conn *ct,
		      enum ip_conntrack_dir dir,
		      u32 seq)
{
	struct nf_conn_nat *nat = nfct_nat(ct);
	struct nf_nat_seq *this_way;
	s16 offset;

	if (!nat)
		return 0;

	this_way = &nat->seq[dir];
	spin_lock_bh(&nf_nat_seqofs_lock);
	offset = after(seq, this_way->correction_pos)
		 ? this_way->offset_after : this_way->offset_before;
	spin_unlock_bh(&nf_nat_seqofs_lock);

	return offset;
}
/* Setup TCP sequence correction given this change at this sequence */
static inline void
adjust_tcp_sequence(u32 seq,
		    int sizediff,
		    struct nf_conn *ct,
		    enum ip_conntrack_info ctinfo)
{
	int dir;
	struct nf_nat_seq *this_way, *other_way;
	struct nf_conn_nat *nat = nfct_nat(ct);

	DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n",
		(*skb)->len, new_size);

	dir = CTINFO2DIR(ctinfo);

	this_way = &nat->info.seq[dir];
	other_way = &nat->info.seq[!dir];

	DEBUGP("nf_nat_resize_packet: Seq_offset before: ");
	DUMP_OFFSET(this_way);

	spin_lock_bh(&nf_nat_seqofs_lock);

	/* SYN adjust. If it's uninitialized, or this is after last
	 * correction, record it: we don't handle more than one
	 * adjustment in the window, but do deal with common case of a
	 * retransmit */
	if (this_way->offset_before == this_way->offset_after ||
	    before(this_way->correction_pos, seq)) {
		   this_way->correction_pos = seq;
		   this_way->offset_before = this_way->offset_after;
		   this_way->offset_after += sizediff;
	}
	spin_unlock_bh(&nf_nat_seqofs_lock);

	DEBUGP("nf_nat_resize_packet: Seq_offset after: ");
	DUMP_OFFSET(this_way);
}
/* inbound packets == from PAC to PNS */
static int
pptp_inbound_pkt(struct sk_buff *skb,
		 struct nf_conn *ct,
		 enum ip_conntrack_info ctinfo,
		 struct PptpControlHeader *ctlh,
		 union pptp_ctrl_union *pptpReq)
{
	const struct nf_nat_pptp *nat_pptp_info;
	u_int16_t msg;
	__be16 new_pcid;
	unsigned int pcid_off;

	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
	new_pcid = nat_pptp_info->pns_call_id;

	switch (msg = ntohs(ctlh->messageType)) {
	case PPTP_OUT_CALL_REPLY:
		pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
		break;
	case PPTP_IN_CALL_CONNECT:
		pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
		break;
	case PPTP_IN_CALL_REQUEST:
		/* only need to nat in case PAC is behind NAT box */
		return NF_ACCEPT;
	case PPTP_WAN_ERROR_NOTIFY:
		pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
		break;
	case PPTP_CALL_DISCONNECT_NOTIFY:
		pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
		break;
	case PPTP_SET_LINK_INFO:
		pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
		break;
	default:
		pr_debug("unknown inbound packet %s\n",
			 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
					       pptp_msg_name[0]);
		/* fall through */
	case PPTP_START_SESSION_REQUEST:
	case PPTP_START_SESSION_REPLY:
	case PPTP_STOP_SESSION_REQUEST:
	case PPTP_STOP_SESSION_REPLY:
	case PPTP_ECHO_REQUEST:
	case PPTP_ECHO_REPLY:
		/* no need to alter packet */
		return NF_ACCEPT;
	}

	/* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
	 * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */

	/* mangle packet */
	pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
		 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));

	if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
				     pcid_off + sizeof(struct pptp_pkt_hdr) +
				     sizeof(struct PptpControlHeader),
				     sizeof(new_pcid), (char *)&new_pcid,
				     sizeof(new_pcid)) == 0)
		return NF_DROP;
	return NF_ACCEPT;
}
/* outbound packets == from PNS to PAC */
static int
pptp_outbound_pkt(struct sk_buff *skb,
		  struct nf_conn *ct,
		  enum ip_conntrack_info ctinfo,
		  struct PptpControlHeader *ctlh,
		  union pptp_ctrl_union *pptpReq)

{
	struct nf_ct_pptp_master *ct_pptp_info;
	struct nf_nat_pptp *nat_pptp_info;
	u_int16_t msg;
	__be16 new_callid;
	unsigned int cid_off;

	ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;

	new_callid = ct_pptp_info->pns_call_id;

	switch (msg = ntohs(ctlh->messageType)) {
	case PPTP_OUT_CALL_REQUEST:
		cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
		/* FIXME: ideally we would want to reserve a call ID
		 * here.  current netfilter NAT core is not able to do
		 * this :( For now we use TCP source port. This breaks
		 * multiple calls within one control session */

		/* save original call ID in nat_info */
		nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;

		/* don't use tcph->source since we are at a DSTmanip
		 * hook (e.g. PREROUTING) and pkt is not mangled yet */
		new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;

		/* save new call ID in ct info */
		ct_pptp_info->pns_call_id = new_callid;
		break;
	case PPTP_IN_CALL_REPLY:
		cid_off = offsetof(union pptp_ctrl_union, icack.callID);
		break;
	case PPTP_CALL_CLEAR_REQUEST:
		cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
		break;
	default:
		pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
			 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
					       pptp_msg_name[0]);
		/* fall through */
	case PPTP_SET_LINK_INFO:
		/* only need to NAT in case PAC is behind NAT box */
	case PPTP_START_SESSION_REQUEST:
	case PPTP_START_SESSION_REPLY:
	case PPTP_STOP_SESSION_REQUEST:
	case PPTP_STOP_SESSION_REPLY:
	case PPTP_ECHO_REQUEST:
	case PPTP_ECHO_REPLY:
		/* no need to alter packet */
		return NF_ACCEPT;
	}

	/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
	 * down to here */
	pr_debug("altering call id from 0x%04x to 0x%04x\n",
		 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));

	/* mangle packet */
	if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
				     cid_off + sizeof(struct pptp_pkt_hdr) +
				     sizeof(struct PptpControlHeader),
				     sizeof(new_callid), (char *)&new_callid,
				     sizeof(new_callid)) == 0)
		return NF_DROP;
	return NF_ACCEPT;
}
/*
 * Look at outgoing ftp packets to catch the response to a PASV command
 * from the server (inside-to-outside).
 * When we see one, we build a connection entry with the client address,
 * client port 0 (unknown at the moment), the server address and the
 * server port.  Mark the current connection entry as a control channel
 * of the new entry. All this work is just to make the data connection
 * can be scheduled to the right server later.
 *
 * The outgoing packet should be something like
 *   "227 Entering Passive Mode (xxx,xxx,xxx,xxx,ppp,ppp)".
 * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number.
 */
static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
			 struct sk_buff *skb, int *diff)
{
	struct iphdr *iph;
	struct tcphdr *th;
	char *data, *data_limit;
	char *start, *end;
	union nf_inet_addr from;
	__be16 port;
	struct ip_vs_conn *n_cp;
	char buf[24];		/* xxx.xxx.xxx.xxx,ppp,ppp\000 */
	unsigned buf_len;
	int ret = 0;
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct;
	struct net *net;

#ifdef CONFIG_IP_VS_IPV6
	/* This application helper doesn't work with IPv6 yet,
	 * so turn this into a no-op for IPv6 packets
	 */
	if (cp->af == AF_INET6)
		return 1;
#endif

	*diff = 0;

	/* Only useful for established sessions */
	if (cp->state != IP_VS_TCP_S_ESTABLISHED)
		return 1;

	/* Linear packets are much easier to deal with. */
	if (!skb_make_writable(skb, skb->len))
		return 0;

	if (cp->app_data == &ip_vs_ftp_pasv) {
		iph = ip_hdr(skb);
		th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]);
		data = (char *)th + (th->doff << 2);
		data_limit = skb_tail_pointer(skb);

		if (ip_vs_ftp_get_addrport(data, data_limit,
					   SERVER_STRING,
					   sizeof(SERVER_STRING)-1,
					   '(', ')',
					   &from.ip, &port,
					   &start, &end) != 1)
			return 1;

		IP_VS_DBG(7, "PASV response (%pI4:%d) -> %pI4:%d detected\n",
			  &from.ip, ntohs(port), &cp->caddr.ip, 0);

		/*
		 * Now update or create an connection entry for it
		 */
		{
			struct ip_vs_conn_param p;
			ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
					      iph->protocol, &from, port,
					      &cp->caddr, 0, &p);
			n_cp = ip_vs_conn_out_get(&p);
		}
		if (!n_cp) {
			struct ip_vs_conn_param p;
			ip_vs_conn_fill_param(ip_vs_conn_net(cp),
					      AF_INET, IPPROTO_TCP, &cp->caddr,
					      0, &cp->vaddr, port, &p);
			n_cp = ip_vs_conn_new(&p, &from, port,
					      IP_VS_CONN_F_NO_CPORT |
					      IP_VS_CONN_F_NFCT,
					      cp->dest, skb->mark);
			if (!n_cp)
				return 0;

			/* add its controller */
			ip_vs_control_add(n_cp, cp);
		}

		/*
		 * Replace the old passive address with the new one
		 */
		from.ip = n_cp->vaddr.ip;
		port = n_cp->vport;
		snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u",
			 ((unsigned char *)&from.ip)[0],
			 ((unsigned char *)&from.ip)[1],
			 ((unsigned char *)&from.ip)[2],
			 ((unsigned char *)&from.ip)[3],
			 ntohs(port) >> 8,
			 ntohs(port) & 0xFF);

		buf_len = strlen(buf);

		ct = nf_ct_get(skb, &ctinfo);
		if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
			/* If mangling fails this function will return 0
			 * which will cause the packet to be dropped.
			 * Mangling can only fail under memory pressure,
			 * hopefully it will succeed on the retransmitted
			 * packet.
			 */
			ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
						       start-data, end-start,
						       buf, buf_len);
			if (ret) {
				ip_vs_nfct_expect_related(skb, ct, n_cp,
							  IPPROTO_TCP, 0, 0);
				if (skb->ip_summed == CHECKSUM_COMPLETE)
					skb->ip_summed = CHECKSUM_UNNECESSARY;
				/* csum is updated */
				ret = 1;
			}
		}

		/*
		 * Not setting 'diff' is intentional, otherwise the sequence
		 * would be adjusted twice.
		 */

		net = skb_net(skb);
		cp->app_data = NULL;
		ip_vs_tcp_conn_listen(net, n_cp);
		ip_vs_conn_put(n_cp);
		return ret;
	}
unsigned int
nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
	       const struct nf_hook_state *state,
	       unsigned int (*do_chain)(void *priv,
					struct sk_buff *skb,
					const struct nf_hook_state *state,
					struct nf_conn *ct))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_nat *nat;
	/* maniptype == SRC for postrouting. */
	enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);

	ct = nf_ct_get(skb, &ctinfo);
	/* Can't track?  It's not due to stress, or conntrack would
	 * have dropped it.  Hence it's the user's responsibilty to
	 * packet filter it out, or implement conntrack/NAT for that
	 * protocol. 8) --RR
	 */
	if (!ct)
		return NF_ACCEPT;

	nat = nfct_nat(ct);

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED_REPLY:
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
							   state->hook))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Only ICMPs can be IP_CT_IS_REPLY: */
		/* fall through */
	case IP_CT_NEW:
		/* Seen it before?  This can happen for loopback, retrans,
		 * or local packets.
		 */
		if (!nf_nat_initialized(ct, maniptype)) {
			unsigned int ret;

			ret = do_chain(priv, skb, state, ct);
			if (ret != NF_ACCEPT)
				return ret;

			if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
				break;

			ret = nf_nat_alloc_null_binding(ct, state->hook);
			if (ret != NF_ACCEPT)
				return ret;
		} else {
			pr_debug("Already setup manip %s for ct %p\n",
				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
				 ct);
			if (nf_nat_oif_changed(state->hook, ctinfo, nat,
					       state->out))
				goto oif_changed;
		}
		break;

	default:
		/* ESTABLISHED */
		WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
			ctinfo != IP_CT_ESTABLISHED_REPLY);
		if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
			goto oif_changed;
	}

	return nf_nat_packet(ct, ctinfo, state->hook, skb);

oif_changed:
	nf_ct_kill_acct(ct, ctinfo, skb);
	return NF_DROP;
}
Exemple #25
0
static unsigned int
nf_nat_fn(unsigned int hooknum,
	  struct sk_buff *skb,
	  const struct net_device *in,
	  const struct net_device *out,
	  int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_nat *nat;
	/* maniptype == SRC for postrouting. */
	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);

	/* We never see fragments: conntrack defrags on pre-routing
	   and local-out, and nf_nat_out protects post-routing. */
	NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));

	ct = nf_ct_get(skb, &ctinfo);
	/* Can't track?  It's not due to stress, or conntrack would
	   have dropped it.  Hence it's the user's responsibilty to
	   packet filter it out, or implement conntrack/NAT for that
	   protocol. 8) --RR */
	if (!ct) {
		/* Exception: ICMP redirect to new connection (not in
		   hash table yet).  We must not let this through, in
		   case we're doing NAT to the same network. */
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			struct icmphdr _hdr, *hp;

			hp = skb_header_pointer(skb, ip_hdrlen(skb),
						sizeof(_hdr), &_hdr);
			if (hp != NULL &&
			    hp->type == ICMP_REDIRECT)
				return NF_DROP;
		}
		return NF_ACCEPT;
	}

	/* Don't try to NAT if this packet is not conntracked */
	if (ct == &nf_conntrack_untracked)
		return NF_ACCEPT;

	nat = nfct_nat(ct);
	if (!nat)
		return NF_ACCEPT;

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED+IP_CT_IS_REPLY:
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			if (!nf_nat_icmp_reply_translation(ct, ctinfo,
							   hooknum, skb))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
	case IP_CT_NEW:

		/* Seen it before?  This can happen for loopback, retrans,
		   or local packets.. */
		if (!nf_nat_initialized(ct, maniptype)) {
			unsigned int ret;

			if (unlikely(nf_ct_is_confirmed(ct)))
				/* NAT module was loaded late */
				ret = alloc_null_binding_confirmed(ct, hooknum);
			else if (hooknum == NF_IP_LOCAL_IN)
				/* LOCAL_IN hook doesn't have a chain!  */
				ret = alloc_null_binding(ct, hooknum);
			else
				ret = nf_nat_rule_find(skb, hooknum, in, out,
						       ct);

			if (ret != NF_ACCEPT) {
				return ret;
			}

			ipt_cone_place_in_hashes(ct);
			
		} else
			DEBUGP("Already setup manip %s for ct %p\n",
			       maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
			       ct);
		break;

	default:
		/* ESTABLISHED */
		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
			     ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
	}

	return nf_nat_packet(ct, ctinfo, hooknum, skb);
}
Exemple #26
0
static unsigned int
conenat_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
       struct net *net;
	struct nf_conn *ct;
       struct nf_conn_nat *nat;
	enum ip_conntrack_info ctinfo;
	struct nf_nat_range newrange;
       const struct nf_nat_multi_range_compat *mr;
	struct rtable *rt;
	__be32 newsrc;

	NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);

	ct = nf_ct_get(skb, &ctinfo);
	nat = nfct_nat(ct);
   	net = nf_ct_net(ct);

	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
			    || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));


    /* Source address is 0.0.0.0 - locally generated packet that is
     * probably not supposed to be masqueraded.
     */
	 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
	        return NF_ACCEPT;

    	mr = par->targinfo;
    	rt = skb->rtable;
    	newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
    	if (!newsrc) {
        	printk("CONENAT: %s ate my IP address\n", par->out->name);
        	return NF_DROP;
    	}

       write_lock_bh(&conenat_lock);
	nat->masq_index = par->out->ifindex;
	write_unlock_bh(&conenat_lock);

	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == IPPROTO_UDP)
	{
		unsigned int ret,expectcount = net->ct.expect_count;
		u_int16_t minport, maxport;
		u_int16_t newport, tmpport;
		struct nf_conntrack_expect *exp=NULL;
              struct nf_conntrack_tuple tuple;
              struct nf_conn_help *help = nfct_help(ct);

		/* Choose port */
		spin_lock_bh(&nf_conntrack_lock);

       		#if 0
		exp = LIST_FIND(&nf_conntrack_expect_list,
                exp_src_cmp,
				struct nf_conntrack_expect *,
				&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
        	#endif

		memset(&tuple,0,sizeof(tuple));

		//src
		tuple.src.l3num = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num;
		tuple.src.u3.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
		tuple.src.u.udp.port = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.udp.port;

        	//dst
              tuple.dst.u3.ip = newsrc;
		//tuple.dst.u.udp.port = htons(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port);
		newport = htons(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port);
		tuple.dst.protonum = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;

		pr_debug("tupple1 = %pI4:%hu\n", &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip,ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port));


		if(expectcount > 0){
			for(tmpport=0; (tmpport<=expectcount)&&(newport<=65535); tmpport++,newport++){
				tuple.dst.u.udp.port=newport;
				exp = __nf_ct_expect_find_bysave(net, &tuple, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
				if(exp)
					break;
			}
		}

		if (exp) {
			minport = maxport = exp->tuple.dst.u.udp.port;
			pr_debug("existing mapped port = %hu\n", ntohs(minport));
		} else {


			minport = mr->range[0].min.udp.port == 0 ?
				ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port :
				mr->range[0].min.udp.port;

			maxport = mr->range[0].max.udp.port == 0 ?
				htons(65535) :
				mr->range[0].max.udp.port;

			for (newport = ntohs(minport),tmpport = ntohs(maxport);
			     newport <= tmpport; newport++) {
                #if 0
                exp = LIST_FIND(&ip_conntrack_expect_list,
					       exp_cmp,
					       struct nf_conntrack_expect *,
					       newsrc, htons(newport), ct->tuplehash[IP_CT_DIR_ORIGINAL].
					       tuple.dst.protonum);
                #endif

				//dst
			tuple.dst.u.udp.port = htons(newport);

               	 exp = __nf_ct_expect_find(net, &tuple);
		        if (!exp)
			{
				pr_debug("new mapping: %pI4:%hu -> %pI4:%hu\n",
                    &(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip),
                    ntohs(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.udp.port),
                    &newsrc,  newport);
				minport = maxport = htons(newport);
				break;
			}
			}
		}
		spin_unlock_bh(&nf_conntrack_lock);

		newrange.flags = mr->range[0].flags | IP_NAT_RANGE_MAP_IPS |IP_NAT_RANGE_PROTO_SPECIFIED;
		newrange.min_ip = newrange.max_ip = newsrc;
		newrange.min.udp.port = minport;
		newrange.max.udp.port = maxport;

		/* Set ct helper */
		ret = nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC);
		if (ret == NF_ACCEPT)
	        {
	            rcu_read_lock();
	            if (help == NULL) {
	                help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
	                if (help == NULL) {
	                    return NF_ACCEPT;
	                }
	            } else {
	                memset(&help->help, 0, sizeof(help->help));
	            }
	            rcu_assign_pointer(help->helper, &nf_conntrack_helper_cone_nat);
	            rcu_read_unlock();

		    pr_debug("helper setup, skb=%p\n", skb);
		}

		return ret;
	}
static void pptp_nat_expected(struct nf_conn *ct,
			      struct nf_conntrack_expect *exp)
{
	const struct nf_conn *master = ct->master;
	struct nf_conntrack_expect *other_exp;
	struct nf_conntrack_tuple t;
	const struct nf_ct_pptp_master *ct_pptp_info;
	const struct nf_nat_pptp *nat_pptp_info;
	struct nf_nat_range range;

	ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
	nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;

	/* And here goes the grand finale of corrosion... */
	if (exp->dir == IP_CT_DIR_ORIGINAL) {
		pr_debug("we are PNS->PAC\n");
		/* therefore, build tuple for PAC->PNS */
		t.src.l3num = AF_INET;
		t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
		t.src.u.gre.key = ct_pptp_info->pac_call_id;
		t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
		t.dst.u.gre.key = ct_pptp_info->pns_call_id;
		t.dst.protonum = IPPROTO_GRE;
	} else {
		pr_debug("we are PAC->PNS\n");
		/* build tuple for PNS->PAC */
		t.src.l3num = AF_INET;
		t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
		t.src.u.gre.key = nat_pptp_info->pns_call_id;
		t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
		t.dst.u.gre.key = nat_pptp_info->pac_call_id;
		t.dst.protonum = IPPROTO_GRE;
	}

	pr_debug("trying to unexpect other dir: ");
	nf_ct_dump_tuple_ip(&t);
	other_exp = nf_ct_expect_find_get(&t);
	if (other_exp) {
		nf_ct_unexpect_related(other_exp);
		nf_ct_expect_put(other_exp);
		pr_debug("success\n");
	} else {
		pr_debug("not found!\n");
	}

	/* This must be a fresh one. */
	BUG_ON(ct->status & IPS_NAT_DONE_MASK);

	/* Change src to where master sends to */
	range.flags = IP_NAT_RANGE_MAP_IPS;
	range.min_ip = range.max_ip
		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
	if (exp->dir == IP_CT_DIR_ORIGINAL) {
		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
		range.min = range.max = exp->saved_proto;
	}
	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);

	/* For DST manip, map port here to where it's expected. */
	range.flags = IP_NAT_RANGE_MAP_IPS;
	range.min_ip = range.max_ip
		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
	if (exp->dir == IP_CT_DIR_REPLY) {
		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
		range.min = range.max = exp->saved_proto;
	}
	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
}
static unsigned int
nf_nat_ipv6_fn(unsigned int hooknum,
	       struct sk_buff *skb,
	       const struct net_device *in,
	       const struct net_device *out,
	       int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_nat *nat;
	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
	__be16 frag_off;
	int hdrlen;
	u8 nexthdr;

	ct = nf_ct_get(skb, &ctinfo);
	/* Can't track?  It's not due to stress, or conntrack would
	 * have dropped it.  Hence it's the user's responsibilty to
	 * packet filter it out, or implement conntrack/NAT for that
	 * protocol. 8) --RR
	 */
	if (!ct)
		return NF_ACCEPT;

	/* Don't try to NAT if this packet is not conntracked */
	if (nf_ct_is_untracked(ct))
		return NF_ACCEPT;

	nat = nfct_nat(ct);
	if (!nat) {
		/* NAT module was loaded late. */
		if (nf_ct_is_confirmed(ct))
			return NF_ACCEPT;
		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
		if (nat == NULL) {
			pr_debug("failed to add NAT extension\n");
			return NF_ACCEPT;
		}
	}

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED_REPLY:
		nexthdr = ipv6_hdr(skb)->nexthdr;
		hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
					  &nexthdr, &frag_off);

		if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
			if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
							     hooknum, hdrlen))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
	case IP_CT_NEW:
		/* Seen it before?  This can happen for loopback, retrans,
		 * or local packets.
		 */
		if (!nf_nat_initialized(ct, maniptype)) {
			unsigned int ret;

			ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
			if (ret != NF_ACCEPT)
				return ret;
		} else {
			pr_debug("Already setup manip %s for ct %p\n",
				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
				 ct);
			if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
				goto oif_changed;
		}
		break;

	default:
		/* ESTABLISHED */
		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
			     ctinfo == IP_CT_ESTABLISHED_REPLY);
		if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
			goto oif_changed;
	}

	return nf_nat_packet(ct, ctinfo, hooknum, skb);

oif_changed:
	nf_ct_kill_acct(ct, ctinfo, skb);
	return NF_DROP;
}
static unsigned int
nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
	       struct sk_buff *skb,
	       const struct net_device *in,
	       const struct net_device *out,
	       int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_nat *nat;
	/* maniptype == SRC for postrouting. */
	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);

	/* We never see fragments: conntrack defrags on pre-routing
	 * and local-out, and nf_nat_out protects post-routing.
	 */
	NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));

	ct = nf_ct_get(skb, &ctinfo);
	/* Can't track?  It's not due to stress, or conntrack would
	 * have dropped it.  Hence it's the user's responsibilty to
	 * packet filter it out, or implement conntrack/NAT for that
	 * protocol. 8) --RR
	 */
	if (!ct)
		return NF_ACCEPT;

	/* Don't try to NAT if this packet is not conntracked */
	if (nf_ct_is_untracked(ct))
		return NF_ACCEPT;

	nat = nfct_nat(ct);
	if (!nat) {
		/* NAT module was loaded late. */
		if (nf_ct_is_confirmed(ct))
			return NF_ACCEPT;
		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
		if (nat == NULL) {
			pr_debug("failed to add NAT extension\n");
			return NF_ACCEPT;
		}
	}

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED_REPLY:
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
							   ops->hooknum))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
	case IP_CT_NEW:
		/* Seen it before?  This can happen for loopback, retrans,
		 * or local packets.
		 */
		if (!nf_nat_initialized(ct, maniptype)) {
			unsigned int ret;

			ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
			if (ret != NF_ACCEPT)
				return ret;
		} else {
			pr_debug("Already setup manip %s for ct %p\n",
				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
				 ct);
			if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
				goto oif_changed;
		}
		break;

	default:
		/* ESTABLISHED */
		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
			     ctinfo == IP_CT_ESTABLISHED_REPLY);
		if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
			goto oif_changed;
	}

	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);

oif_changed:
	nf_ct_kill_acct(ct, ctinfo, skb);
	return NF_DROP;
}
static unsigned int
nf_nat_fn(unsigned int hooknum,
	  struct sk_buff *skb,
	  const struct net_device *in,
	  const struct net_device *out,
	  int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_nat *nat;
	/* maniptype == SRC for postrouting. */
	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);

	/* We never see fragments: conntrack defrags on pre-routing
	   and local-out, and nf_nat_out protects post-routing. */
	NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));

	ct = nf_ct_get(skb, &ctinfo);
	/* Can't track?  It's not due to stress, or conntrack would
	   have dropped it.  Hence it's the user's responsibilty to
	   packet filter it out, or implement conntrack/NAT for that
	   protocol. 8) --RR */
	if (!ct)
		return NF_ACCEPT;

	/* Don't try to NAT if this packet is not conntracked */
	if (ct == &nf_conntrack_untracked)
		return NF_ACCEPT;

	nat = nfct_nat(ct);
	if (!nat) {
		/* NAT module was loaded late. */
		if (nf_ct_is_confirmed(ct))
			return NF_ACCEPT;
		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
		if (nat == NULL) {
			pr_debug("failed to add NAT extension\n");
			return NF_ACCEPT;
		}
	}

	switch (ctinfo) {
	case IP_CT_RELATED:
	case IP_CT_RELATED+IP_CT_IS_REPLY:
		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
			if (!nf_nat_icmp_reply_translation(ct, ctinfo,
							   hooknum, skb))
				return NF_DROP;
			else
				return NF_ACCEPT;
		}
		/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
	case IP_CT_NEW:

		/* Seen it before?  This can happen for loopback, retrans,
		   or local packets.. */
		if (!nf_nat_initialized(ct, maniptype)) {
			unsigned int ret;

			if (hooknum == NF_INET_LOCAL_IN)
				/* LOCAL_IN hook doesn't have a chain!  */
				ret = alloc_null_binding(ct, hooknum);
			else
				ret = nf_nat_rule_find(skb, hooknum, in, out,
						       ct);

			if (ret != NF_ACCEPT) {
				return ret;
			}
		} else
			pr_debug("Already setup manip %s for ct %p\n",
				 maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
				 ct);
		break;

	default:
		/* ESTABLISHED */
		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
			     ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
	}

	return nf_nat_packet(ct, ctinfo, hooknum, skb);
}