Esempio n. 1
0
/*
 * Syn-proxy step 2 logic
 * Receive client's 3-handshakes  Ack packet, do cookie check
 * and then send syn to rs after creating a session.
 *
 */
int
ip_vs_synproxy_ack_rcv(int af, struct sk_buff *skb, struct tcphdr *th,
		       struct ip_vs_protocol *pp, struct ip_vs_conn **cpp,
		       struct ip_vs_iphdr *iph, int *verdict)
{
	struct ip_vs_synproxy_opt opt;
	struct ip_vs_service *svc;
	int res_cookie_check;

	/*
	 * Don't check svc syn-proxy flag, as it may
	 * be changed after syn-proxy step 1.
	 */
	if (!th->syn && th->ack && !th->rst && !th->fin &&
	    (svc =
	     ip_vs_service_get(af, skb->mark, iph->protocol, &iph->daddr,
			       th->dest))) {
		if (ip_vs_todrop()) {
			/*
			 * It seems that we are very loaded.
			 * We have to drop this packet :(
			 */
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		if (sysctl_ip_vs_synproxy_defer &&
		    !syn_proxy_ack_has_data(skb, iph, th)) {
			/* update statistics */
			IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_NULL_ACK);
			/*
			 * When expecting ack packet with payload,
			 * we get a pure ack, so have to drop it.
			 */
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		/*
		 * Import: set tcp hdr before cookie check, as it
		 * will be used in cookie_check funcs.
		 */
		skb_set_transport_header(skb, iph->len);
#ifdef CONFIG_IP_VS_IPV6
		if (af == AF_INET6) {
			res_cookie_check = ip_vs_synproxy_v6_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		} else
#endif
		{
			res_cookie_check = ip_vs_synproxy_v4_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		}

		if (!res_cookie_check) {
			/* update statistics */
			IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_BAD_ACK);
			/*
			 * Cookie check fail, drop it.
			 */
			IP_VS_DBG(6, "syn_cookie check failed seq=%u\n",
				  ntohl(th->ack_seq) - 1);
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		/* update statistics */
		IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_OK_ACK);

		/*
		 * Let the virtual server select a real server for the
		 * incoming connection, and create a connection entry.
		 */
		*cpp = ip_vs_schedule(svc, skb, 1);
		if (!*cpp) {
			IP_VS_DBG(6, "ip_vs_schedule failed\n");
			*verdict = ip_vs_leave(svc, skb, pp);
			return 0;
		}

		/*
		 * Release service, we don't need it any more.
		 */
		ip_vs_service_put(svc);

		/*
		 * Do anything but print a error msg when fail.
		 * Because session will be correctly freed in ip_vs_conn_expire.
		 */
		if (!syn_proxy_send_rs_syn(af, th, *cpp, skb, pp, &opt)) {
			IP_VS_ERR_RL("syn_proxy_send_rs_syn failed!\n");
		}

		/* count in the ack packet (STOLEN by synproxy) */
		ip_vs_in_stats(*cpp, skb);

		/*
		 * Active sesion timer, and dec refcnt.
		 * Also stole the skb, and let caller return immediately.
		 */
		ip_vs_conn_put(*cpp);
		*verdict = NF_STOLEN;
		return 0;
	}

	return 1;
}
Esempio n. 2
0
/*
 *	Check if it's for virtual services, look it up,
 *	and send it on its way...
 */
static unsigned int
ip_vs_in(unsigned int hooknum, struct sk_buff **pskb,
	 const struct net_device *in, const struct net_device *out,
	 int (*okfn)(struct sk_buff *))
{
	struct sk_buff	*skb = *pskb;
	struct iphdr	*iph;
	struct ip_vs_protocol *pp;
	struct ip_vs_conn *cp;
	int ret, restart;
	int ihl;

	/*
	 *	Big tappo: only PACKET_HOST (neither loopback nor mcasts)
	 *	... don't know why 1st test DOES NOT include 2nd (?)
	 */
	if (unlikely(skb->pkt_type != PACKET_HOST
		     || skb->dev == &loopback_dev || skb->sk)) {
		IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n",
			  skb->pkt_type,
			  ip_hdr(skb)->protocol,
			  NIPQUAD(ip_hdr(skb)->daddr));
		return NF_ACCEPT;
	}

	iph = ip_hdr(skb);
	if (unlikely(iph->protocol == IPPROTO_ICMP)) {
		int related, verdict = ip_vs_in_icmp(pskb, &related, hooknum);

		if (related)
			return verdict;
		skb = *pskb;
		iph = ip_hdr(skb);
	}

	/* Protocol supported? */
	pp = ip_vs_proto_get(iph->protocol);
	if (unlikely(!pp))
		return NF_ACCEPT;

	ihl = iph->ihl << 2;

	/*
	 * Check if the packet belongs to an existing connection entry
	 */
	cp = pp->conn_in_get(skb, pp, iph, ihl, 0);

	if (unlikely(!cp)) {
		int v;

		if (!pp->conn_schedule(skb, pp, &v, &cp))
			return v;
	}

	if (unlikely(!cp)) {
		/* sorry, all this trouble for a no-hit :) */
		IP_VS_DBG_PKT(12, pp, skb, 0,
			      "packet continues traversal as normal");
		return NF_ACCEPT;
	}

	IP_VS_DBG_PKT(11, pp, skb, 0, "Incoming packet");

	/* Check the server status */
	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
		/* the destination server is not available */

		if (sysctl_ip_vs_expire_nodest_conn) {
			/* try to expire the connection immediately */
			ip_vs_conn_expire_now(cp);
		}
		/* don't restart its timer, and silently
		   drop the packet. */
		__ip_vs_conn_put(cp);
		return NF_DROP;
	}

	ip_vs_in_stats(cp, skb);
	restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
	if (cp->packet_xmit)
		ret = cp->packet_xmit(skb, cp, pp);
		/* do not touch skb anymore */
	else {
		IP_VS_DBG_RL("warning: packet_xmit is null");
		ret = NF_ACCEPT;
	}

	/* increase its packet counter and check if it is needed
	   to be synchronized */
	atomic_inc(&cp->in_pkts);
	if ((ip_vs_sync_state & IP_VS_STATE_MASTER) &&
	    (cp->protocol != IPPROTO_TCP ||
	     cp->state == IP_VS_TCP_S_ESTABLISHED) &&
	    (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
	     == sysctl_ip_vs_sync_threshold[0]))
		ip_vs_sync_conn(cp);

	ip_vs_conn_put(cp);
	return ret;
}
Esempio n. 3
0
/*
 * Create syn packet and send it to rs.
 * ATTENTION: we also store syn skb in cp if syn retransimition
 * is tured on.
 */
static int
syn_proxy_send_rs_syn(int af, const struct tcphdr *th,
		      struct ip_vs_conn *cp, struct sk_buff *skb,
		      struct ip_vs_protocol *pp, struct ip_vs_synproxy_opt *opt)
{
	struct sk_buff *syn_skb;
	int tcp_hdr_size;
	__u8 tcp_flags = TCPCB_FLAG_SYN;
	unsigned int tcphoff;
	struct tcphdr *new_th;

	if (!cp->packet_xmit) {
		IP_VS_ERR_RL("warning: packet_xmit is null");
		return 0;
	}

	syn_skb = alloc_skb(MAX_TCP_HEADER + 15, GFP_ATOMIC);
	if (unlikely(syn_skb == NULL)) {
		IP_VS_ERR_RL("alloc skb failed when send rs syn packet\n");
		return 0;
	}

	/* Reserve space for headers */
	skb_reserve(syn_skb, MAX_TCP_HEADER);
	tcp_hdr_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
			(opt->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
			(opt->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
			/* SACK_PERM is in the place of NOP NOP of TS */
			((opt->sack_ok
			  && !opt->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));

	new_th = (struct tcphdr *)skb_push(syn_skb, tcp_hdr_size);
	/* Compose tcp header */
	skb_reset_transport_header(syn_skb);
	syn_skb->csum = 0;

	/* Set tcp hdr */
	new_th->source = th->source;
	new_th->dest = th->dest;
	new_th->seq = htonl(ntohl(th->seq) - 1);
	new_th->ack_seq = 0;
	*(((__u16 *) new_th) + 6) =
	    htons(((tcp_hdr_size >> 2) << 12) | tcp_flags);
	/* FIX_ME: what window should we use */
	new_th->window = htons(5000);
	new_th->check = 0;
	new_th->urg_ptr = 0;
	new_th->urg = 0;
	new_th->ece = 0;
	new_th->cwr = 0;

	syn_proxy_syn_build_options((__be32 *) (new_th + 1), opt);

	/*
	 * Set ip hdr
	 * Attention: set source and dest addr to ack skb's.
	 * we rely on packet_xmit func to do NATs thing.
	 */
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6) {
		struct ipv6hdr *ack_iph = ipv6_hdr(skb);
		struct ipv6hdr *iph =
		    (struct ipv6hdr *)skb_push(syn_skb, sizeof(struct ipv6hdr));

		tcphoff = sizeof(struct ipv6hdr);
		skb_reset_network_header(syn_skb);
		memcpy(&iph->saddr, &ack_iph->saddr, sizeof(struct in6_addr));
		memcpy(&iph->daddr, &ack_iph->daddr, sizeof(struct in6_addr));

		iph->version = 6;
		iph->nexthdr = NEXTHDR_TCP;
		iph->payload_len = htons(tcp_hdr_size);
		iph->hop_limit = IPV6_DEFAULT_HOPLIMIT;

		new_th->check = 0;
		syn_skb->csum =
		    skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0);
		new_th->check =
		    csum_ipv6_magic(&iph->saddr, &iph->daddr,
				    syn_skb->len - tcphoff, IPPROTO_TCP,
				    syn_skb->csum);
	} else
#endif
	{
		struct iphdr *ack_iph = ip_hdr(skb);
		u32 rtos = RT_TOS(ack_iph->tos);
		struct iphdr *iph =
		    (struct iphdr *)skb_push(syn_skb, sizeof(struct iphdr));

		tcphoff = sizeof(struct iphdr);
		skb_reset_network_header(syn_skb);
		*((__u16 *) iph) = htons((4 << 12) | (5 << 8) | (rtos & 0xff));
		iph->tot_len = htons(syn_skb->len);
		iph->frag_off = htons(IP_DF);
		/* FIX_ME: what ttl shoule we use */
		iph->ttl = IPDEFTTL;
		iph->protocol = IPPROTO_TCP;
		iph->saddr = ack_iph->saddr;
		iph->daddr = ack_iph->daddr;

		ip_send_check(iph);

		new_th->check = 0;
		syn_skb->csum =
		    skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0);
		new_th->check =
		    csum_tcpudp_magic(iph->saddr, iph->daddr,
				      syn_skb->len - tcphoff, IPPROTO_TCP,
				      syn_skb->csum);
	}

	/* Save syn_skb if syn retransmission is on  */
	if (sysctl_ip_vs_synproxy_syn_retry > 0) {
		cp->syn_skb = skb_copy(syn_skb, GFP_ATOMIC);
		atomic_set(&cp->syn_retry_max, sysctl_ip_vs_synproxy_syn_retry);
	}

	/* Save info for fast_response_xmit */
	if(sysctl_ip_vs_fast_xmit && skb->dev &&
				likely(skb->dev->type == ARPHRD_ETHER) &&
				skb_mac_header_was_set(skb)) {
		struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);

		if(likely(cp->indev == NULL)) {
			cp->indev = skb->dev;
			dev_hold(cp->indev);
		}

		if (unlikely(cp->indev != skb->dev)) {
			dev_put(cp->indev);
			cp->indev = skb->dev;
			dev_hold(cp->indev);
		}

		memcpy(cp->src_hwaddr, eth->h_source, ETH_ALEN);
		memcpy(cp->dst_hwaddr, eth->h_dest, ETH_ALEN);
		IP_VS_INC_ESTATS(ip_vs_esmib, FAST_XMIT_SYNPROXY_SAVE);
		IP_VS_DBG_RL("syn_proxy_send_rs_syn netdevice:%s\n",
						netdev_name(skb->dev));
	}

	/* count in the syn packet */
	ip_vs_in_stats(cp, skb);

	/* If xmit failed, syn_skb will be freed correctly. */
	cp->packet_xmit(syn_skb, cp, pp);

	return 1;
}
Esempio n. 4
0
/*
 *  Pass or drop the packet.
 *  Called by ip_vs_in, when the virtual service is available but
 *  no destination is available for a new connection.
 */
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
		struct ip_vs_protocol *pp)
{
	__be16 _ports[2], *pptr;
	struct iphdr *iph = ip_hdr(skb);

	pptr = skb_header_pointer(skb, iph->ihl*4,
				  sizeof(_ports), _ports);
	if (pptr == NULL) {
		ip_vs_service_put(svc);
		return NF_DROP;
	}

	/* if it is fwmark-based service, the cache_bypass sysctl is up
	   and the destination is RTN_UNICAST (and not local), then create
	   a cache_bypass connection entry */
	if (sysctl_ip_vs_cache_bypass && svc->fwmark
	    && (inet_addr_type(iph->daddr) == RTN_UNICAST)) {
		int ret, cs;
		struct ip_vs_conn *cp;

		ip_vs_service_put(svc);

		/* create a new connection entry */
		IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
		cp = ip_vs_conn_new(iph->protocol,
				    iph->saddr, pptr[0],
				    iph->daddr, pptr[1],
				    0, 0,
				    IP_VS_CONN_F_BYPASS,
				    NULL);
		if (cp == NULL)
			return NF_DROP;

		/* statistics */
		ip_vs_in_stats(cp, skb);

		/* set state */
		cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);

		/* transmit the first SYN packet */
		ret = cp->packet_xmit(skb, cp, pp);
		/* do not touch skb anymore */

		atomic_inc(&cp->in_pkts);
		ip_vs_conn_put(cp);
		return ret;
	}

	/*
	 * When the virtual ftp service is presented, packets destined
	 * for other services on the VIP may get here (except services
	 * listed in the ipvs table), pass the packets, because it is
	 * not ipvs job to decide to drop the packets.
	 */
	if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) {
		ip_vs_service_put(svc);
		return NF_ACCEPT;
	}

	ip_vs_service_put(svc);

	/*
	 * Notify the client that the destination is unreachable, and
	 * release the socket buffer.
	 * Since it is in IP layer, the TCP socket is not actually
	 * created, the TCP RST packet cannot be sent, instead that
	 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
	 */
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
	return NF_DROP;
}
Esempio n. 5
0
/*
 *	Handle ICMP messages in the outside-to-inside direction (incoming).
 *	Find any that might be relevant, check against existing connections,
 *	forward to the right destination host if relevant.
 *	Currently handles error types - unreachable, quench, ttl exceeded.
 */
static int
ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
{
	struct sk_buff *skb = *pskb;
	struct iphdr *iph;
	struct icmphdr	_icmph, *ic;
	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
	struct ip_vs_conn *cp;
	struct ip_vs_protocol *pp;
	unsigned int offset, ihl, verdict;

	*related = 1;

	/* reassemble IP fragments */
	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
		skb = ip_vs_gather_frags(skb,
					 hooknum == NF_IP_LOCAL_IN ?
					 IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD);
		if (!skb)
			return NF_STOLEN;
		*pskb = skb;
	}

	iph = ip_hdr(skb);
	offset = ihl = iph->ihl * 4;
	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
	if (ic == NULL)
		return NF_DROP;

	IP_VS_DBG(12, "Incoming ICMP (%d,%d) %u.%u.%u.%u->%u.%u.%u.%u\n",
		  ic->type, ntohs(icmp_id(ic)),
		  NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));

	/*
	 * Work through seeing if this is for us.
	 * These checks are supposed to be in an order that means easy
	 * things are checked first to speed up processing.... however
	 * this means that some packets will manage to get a long way
	 * down this stack and then be rejected, but that's life.
	 */
	if ((ic->type != ICMP_DEST_UNREACH) &&
	    (ic->type != ICMP_SOURCE_QUENCH) &&
	    (ic->type != ICMP_TIME_EXCEEDED)) {
		*related = 0;
		return NF_ACCEPT;
	}

	/* Now find the contained IP header */
	offset += sizeof(_icmph);
	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
	if (cih == NULL)
		return NF_ACCEPT; /* The packet looks wrong, ignore */

	pp = ip_vs_proto_get(cih->protocol);
	if (!pp)
		return NF_ACCEPT;

	/* Is the embedded protocol header present? */
	if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
		     pp->dont_defrag))
		return NF_ACCEPT;

	IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMP for");

	offset += cih->ihl * 4;

	/* The embedded headers contain source and dest in reverse order */
	cp = pp->conn_in_get(skb, pp, cih, offset, 1);
	if (!cp)
		return NF_ACCEPT;

	verdict = NF_DROP;

	/* Ensure the checksum is correct */
	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
		/* Failed checksum! */
		IP_VS_DBG(1, "Incoming ICMP: failed checksum from %d.%d.%d.%d!\n",
			  NIPQUAD(iph->saddr));
		goto out;
	}

	/* do the statistics and put it back */
	ip_vs_in_stats(cp, skb);
	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
		offset += 2 * sizeof(__u16);
	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset);
	/* do not touch skb anymore */

  out:
	__ip_vs_conn_put(cp);

	return verdict;
}
Esempio n. 6
0
/*
 *	Handle ICMP messages in the outside-to-inside direction (incoming)
 *	and sometimes in outgoing direction from ip_vs_forward_icmp.
 *	Find any that might be relevant, check against existing connections,
 *	forward to the right destination host if relevant.
 *	Currently handles error types - unreachable, quench, ttl exceeded.
 */
static int ip_vs_in_icmp(struct sk_buff **skb_p)
{
	struct sk_buff	*skb   = *skb_p;
	struct iphdr    *iph;
	struct icmphdr  *icmph;
	struct iphdr    *ciph;	/* The ip header contained within the ICMP */
	__u16	        *pptr;	/* port numbers from TCP/UDP contained header */
	unsigned short   len;
	unsigned short	clen, csize;
	struct ip_vs_conn *cp;
	struct rtable *rt;			/* Route to the other host */
	int    mtu;

	if (skb_is_nonlinear(skb)) {
		if (skb_linearize(skb, GFP_ATOMIC) != 0)
			return NF_DROP;
	}

	iph = skb->nh.iph;
	ip_send_check(iph);
	icmph = (struct icmphdr *)((char *)iph + (iph->ihl << 2));
	len = ntohs(iph->tot_len) - (iph->ihl<<2);
	if (len < sizeof(struct icmphdr))
		return NF_DROP;

	IP_VS_DBG(12, "icmp in (%d,%d) %u.%u.%u.%u -> %u.%u.%u.%u\n",
		  icmph->type, ntohs(icmp_id(icmph)),
		  NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));

	if ((icmph->type != ICMP_DEST_UNREACH) &&
	    (icmph->type != ICMP_SOURCE_QUENCH) &&
	    (icmph->type != ICMP_TIME_EXCEEDED))
		return NF_ACCEPT;

	/*
	 * If we get here we have an ICMP error of one of the above 3 types
	 * Now find the contained IP header
	 */
	clen = len - sizeof(struct icmphdr);
	if (clen < sizeof(struct iphdr))
		return NF_DROP;
	ciph = (struct iphdr *) (icmph + 1);
	csize = ciph->ihl << 2;
	if (clen < csize)
		return NF_DROP;

	/* We are only interested ICMPs generated from TCP or UDP packets */
	if (ciph->protocol != IPPROTO_UDP && ciph->protocol != IPPROTO_TCP)
		return NF_ACCEPT;

	/* Skip non-first embedded TCP/UDP fragments */
	if (ciph->frag_off & __constant_htons(IP_OFFSET))
		return NF_ACCEPT;

	/* We need at least TCP/UDP ports here */
	if (clen < csize + sizeof(struct udphdr))
		return NF_DROP;

	/* Ensure the checksum is correct */
	if (ip_compute_csum((unsigned char *) icmph, len)) {
		/* Failed checksum! */
		IP_VS_ERR_RL("incoming ICMP: failed checksum from "
			     "%d.%d.%d.%d!\n", NIPQUAD(iph->saddr));
		return NF_DROP;
	}

	pptr = (__u16 *)&(((char *)ciph)[csize]);

	IP_VS_DBG(11, "Handling incoming ICMP for "
		  "%u.%u.%u.%u:%d -> %u.%u.%u.%u:%d\n",
		  NIPQUAD(ciph->saddr), ntohs(pptr[0]),
		  NIPQUAD(ciph->daddr), ntohs(pptr[1]));

	/* This is pretty much what ip_vs_conn_in_get() does,
	   except parameters are in the reverse order */
	cp = ip_vs_conn_in_get(ciph->protocol,
			       ciph->daddr, pptr[1],
			       ciph->saddr, pptr[0]);
	if (cp == NULL)
		return NF_ACCEPT;

	ip_vs_in_stats(cp, skb);

	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
	   forwarded directly here, because there is no need to
	   translate address/port back */
	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
		int ret;
		if (cp->packet_xmit)
			ret = cp->packet_xmit(skb, cp);
		else
			ret = NF_ACCEPT;
		atomic_inc(&cp->in_pkts);
		ip_vs_conn_put(cp);
		return ret;
	}

	/*
	 * mangle and send the packet here
	 */
	if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
		goto tx_error_icmp;

	/* MTU checking */
	mtu = rt->u.dst.pmtu;
	if ((skb->len > mtu) && (iph->frag_off&__constant_htons(IP_DF))) {
		ip_rt_put(rt);
		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
		IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
		goto tx_error;
	}

	/* drop old route */
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/* copy-on-write the packet before mangling it */
	if (ip_vs_skb_cow(skb, rt->u.dst.dev->hard_header_len,
			  &iph, (unsigned char**)&icmph)) {
		ip_vs_conn_put(cp);
		return NF_DROP;
	}
	ciph = (struct iphdr *) (icmph + 1);
	pptr = (__u16 *)&(((char *)ciph)[csize]);

	/* The ICMP packet for VS/NAT must be written to correct addresses
	   before being forwarded to the right server */

	/* First change the dest IP address, and recalc checksum */
	iph->daddr = cp->daddr;
	ip_send_check(iph);

	/* Now change the *source* address in the contained IP */
	ciph->saddr = cp->daddr;
	ip_send_check(ciph);

	/* the TCP/UDP source port - cannot redo check */
	pptr[0] = cp->dport;

	/* And finally the ICMP checksum */
	icmph->checksum = 0;
	icmph->checksum = ip_compute_csum((unsigned char *) icmph, len);
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	IP_VS_DBG(11, "Forwarding incoming ICMP to "
		  "%u.%u.%u.%u:%d -> %u.%u.%u.%u:%d\n",
		  NIPQUAD(ciph->saddr), ntohs(pptr[0]),
		  NIPQUAD(ciph->daddr), ntohs(pptr[1]));

#ifdef CONFIG_NETFILTER_DEBUG
	skb->nf_debug = 1 << NF_IP_LOCAL_OUT;
#endif /* CONFIG_NETFILTER_DEBUG */
	ip_send(skb);
	ip_vs_conn_put(cp);
	return NF_STOLEN;

  tx_error_icmp:
	dst_link_failure(skb);
  tx_error:
	dev_kfree_skb(skb);
	ip_vs_conn_put(cp);
	return NF_STOLEN;
}
Esempio n. 7
0
/*
 *  Pass or drop the packet.
 *  Called by ip_vs_in, when the virtual service is available but
 *  no destination is available for a new connection.
 */
static int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb)
{
	struct iphdr *iph = skb->nh.iph;
	__u16 *portp = (__u16 *)&(((char *)iph)[iph->ihl*4]);

	/* if it is fwmark-based service, the cache_bypass sysctl is up
	   and the destination is RTN_UNICAST (and not local), then create
	   a cache_bypass connection entry */
	if (sysctl_ip_vs_cache_bypass && svc->fwmark
	    && (inet_addr_type(iph->daddr) == RTN_UNICAST)) {
		int ret;
		struct ip_vs_conn *cp;

		ip_vs_service_put(svc);

		/* create a new connection entry */
		IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
		cp = ip_vs_conn_new(iph->protocol,
				    iph->saddr, portp[0],
				    iph->daddr, portp[1],
				    0, 0,
				    IP_VS_CONN_F_BYPASS,
				    NULL);
		if (cp == NULL) {
			kfree_skb(skb);
			return NF_STOLEN;
		}

		/* statistics */
		ip_vs_in_stats(cp, skb);

		/* set state */
		ip_vs_set_state(cp, VS_STATE_INPUT, iph, portp);

		/* transmit the first SYN packet */
		ret = cp->packet_xmit(skb, cp);

		atomic_inc(&cp->in_pkts);
		ip_vs_conn_put(cp);
		return ret;
	}

	/*
	 * When the virtual ftp service is presented, packets destined
	 * for other services on the VIP may get here (except services
	 * listed in the ipvs table), pass the packets, because it is
	 * not ipvs job to decide to drop the packets.
	 */
	if ((svc->port == FTPPORT) && (portp[1] != FTPPORT)) {
		ip_vs_service_put(svc);
		return NF_ACCEPT;
	}

	ip_vs_service_put(svc);

	/*
	 * Notify the client that the destination is unreachable, and
	 * release the socket buffer.
	 * Since it is in IP layer, the TCP socket is not actually
	 * created, the TCP RST packet cannot be sent, instead that
	 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
	 */
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
	kfree_skb(skb);
	return NF_STOLEN;
}
Esempio n. 8
0
/*
 *	Check if it's for virtual services, look it up,
 *	and send it on its way...
 */
static unsigned int ip_vs_in(unsigned int hooknum,
			     struct sk_buff **skb_p,
			     const struct net_device *in,
			     const struct net_device *out,
			     int (*okfn)(struct sk_buff *))
{
	struct sk_buff	*skb = *skb_p;
	struct iphdr	*iph = skb->nh.iph;
	union ip_vs_tphdr h;
	struct ip_vs_conn *cp;
	struct ip_vs_service *svc;
	int ihl;
	int ret;

	/*
	 *	Big tappo: only PACKET_HOST (nor loopback neither mcasts)
	 *	... don't know why 1st test DOES NOT include 2nd (?)
	 */
	if (skb->pkt_type != PACKET_HOST || skb->dev == &loopback_dev) {
		IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n",
			  skb->pkt_type,
			  iph->protocol,
			  NIPQUAD(iph->daddr));
		return NF_ACCEPT;
	}

	if (iph->protocol == IPPROTO_ICMP)
		return ip_vs_in_icmp(skb_p);

	/* let it go if other IP protocols */
	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
		return NF_ACCEPT;

	/* make sure that protocol header available in skb data area,
	   note that skb data area may be reallocated. */
	ihl = iph->ihl << 2;
	if (ip_vs_header_check(skb, iph->protocol, ihl) == -1)
		return NF_DROP;
	iph = skb->nh.iph;
	h.raw = (char*) iph + ihl;

	/*
	 * Check if the packet belongs to an existing connection entry
	 */
	cp = ip_vs_conn_in_get(iph->protocol, iph->saddr, h.portp[0],
			       iph->daddr, h.portp[1]);

	if (!cp &&
	    (h.th->syn || (iph->protocol!=IPPROTO_TCP)) &&
	    (svc = ip_vs_service_get(skb->nfmark, iph->protocol,
				     iph->daddr, h.portp[1]))) {
		if (ip_vs_todrop()) {
			/*
			 * It seems that we are very loaded.
			 * We have to drop this packet :(
			 */
			ip_vs_service_put(svc);
			return NF_DROP;
		}

		/*
		 * Let the virtual server select a real server for the
		 * incoming connection, and create a connection entry.
		 */
		cp = ip_vs_schedule(svc, iph);
		if (!cp)
			return ip_vs_leave(svc, skb);
		ip_vs_conn_stats(cp, svc);
		ip_vs_service_put(svc);
	}

	if (!cp) {
		/* sorry, all this trouble for a no-hit :) */
		IP_VS_DBG(12, "packet for %s %d.%d.%d.%d:%d continue "
			  "traversal as normal.\n",
			  ip_vs_proto_name(iph->protocol),
			  NIPQUAD(iph->daddr),
			  ntohs(h.portp[1]));
		return NF_ACCEPT;
	}

	IP_VS_DBG(11, "Incoming %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
		  ip_vs_proto_name(iph->protocol),
		  NIPQUAD(iph->saddr), ntohs(h.portp[0]),
		  NIPQUAD(iph->daddr), ntohs(h.portp[1]));

	/* Check the server status */
	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
		/* the destination server is not available */

		if (sysctl_ip_vs_expire_nodest_conn) {
			/* try to expire the connection immediately */
			ip_vs_conn_expire_now(cp);
		} else {
			/* don't restart its timer, and silently
			   drop the packet. */
			__ip_vs_conn_put(cp);
		}
		return NF_DROP;
	}

	ip_vs_in_stats(cp, skb);
	ip_vs_set_state(cp, VS_STATE_INPUT, iph, h.portp);
	if (cp->packet_xmit)
		ret = cp->packet_xmit(skb, cp);
	else {
		IP_VS_DBG_RL("warning: packet_xmit is null");
		ret = NF_ACCEPT;
	}

	/* increase its packet counter and check if it is needed
	   to be synchronized */
	atomic_inc(&cp->in_pkts);
	if (ip_vs_sync_state & IP_VS_STATE_MASTER &&
	    (cp->protocol != IPPROTO_TCP ||
	     cp->state == IP_VS_S_ESTABLISHED) &&
	    (atomic_read(&cp->in_pkts) % 50 == sysctl_ip_vs_sync_threshold))
		ip_vs_sync_conn(cp);

	ip_vs_conn_put(cp);
	return ret;
}
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
		    struct sk_buff *skb,
		    __be16 src_port, __be16 dst_port, int *ignored)
{
	struct ip_vs_conn *cp = NULL;
	struct ip_vs_iphdr iph;
	struct ip_vs_dest *dest;
	struct ip_vs_conn *ct;
	__be16 dport = 0;		
	unsigned int flags;
	struct ip_vs_conn_param param;
	const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
	union nf_inet_addr snet;	

	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);

	
#ifdef CONFIG_IP_VS_IPV6
	if (svc->af == AF_INET6)
		ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
	else
#endif
		snet.ip = iph.saddr.ip & svc->netmask;

	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
		      "mnet %s\n",
		      IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
		      IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
		      IP_VS_DBG_ADDR(svc->af, &snet));

	{
		int protocol = iph.protocol;
		const union nf_inet_addr *vaddr = &iph.daddr;
		__be16 vport = 0;

		if (dst_port == svc->port) {
			if (svc->port != FTPPORT)
				vport = dst_port;
		} else {
			if (svc->fwmark) {
				protocol = IPPROTO_IP;
				vaddr = &fwmark;
			}
		}
		
		if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
						  vaddr, vport, &param) < 0) {
			*ignored = -1;
			return NULL;
		}
	}

	
	ct = ip_vs_ct_in_get(&param);
	if (!ct || !ip_vs_check_template(ct)) {
		dest = svc->scheduler->schedule(svc, skb);
		if (!dest) {
			IP_VS_DBG(1, "p-schedule: no dest found.\n");
			kfree(param.pe_data);
			*ignored = 0;
			return NULL;
		}

		if (dst_port == svc->port && svc->port != FTPPORT)
			dport = dest->port;

		ct = ip_vs_conn_new(&param, &dest->addr, dport,
				    IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
		if (ct == NULL) {
			kfree(param.pe_data);
			*ignored = -1;
			return NULL;
		}

		ct->timeout = svc->timeout;
	} else {
		
		dest = ct->dest;
		kfree(param.pe_data);
	}

	dport = dst_port;
	if (dport == svc->port && dest->port)
		dport = dest->port;

	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
		 && iph.protocol == IPPROTO_UDP)?
		IP_VS_CONN_F_ONE_PACKET : 0;

	ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
			      src_port, &iph.daddr, dst_port, &param);

	cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
	if (cp == NULL) {
		ip_vs_conn_put(ct);
		*ignored = -1;
		return NULL;
	}

	ip_vs_control_add(cp, ct);
	ip_vs_conn_put(ct);

	ip_vs_conn_stats(cp, svc);
	return cp;
}


struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
	       struct ip_vs_proto_data *pd, int *ignored)
{
	struct ip_vs_protocol *pp = pd->pp;
	struct ip_vs_conn *cp = NULL;
	struct ip_vs_iphdr iph;
	struct ip_vs_dest *dest;
	__be16 _ports[2], *pptr;
	unsigned int flags;

	*ignored = 1;
	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
	if (pptr == NULL)
		return NULL;

	if (pptr[0] == FTPDATA) {
		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
			      "Not scheduling FTPDATA");
		return NULL;
	}

	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
	    (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
			      "Not scheduling reply for existing connection");
		__ip_vs_conn_put(cp);
		return NULL;
	}

	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);

	*ignored = 0;

	if (!svc->fwmark && pptr[1] != svc->port) {
		if (!svc->port)
			pr_err("Schedule: port zero only supported "
			       "in persistent services, "
			       "check your ipvs configuration\n");
		return NULL;
	}

	dest = svc->scheduler->schedule(svc, skb);
	if (dest == NULL) {
		IP_VS_DBG(1, "Schedule: no dest found.\n");
		return NULL;
	}

	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
		 && iph.protocol == IPPROTO_UDP)?
		IP_VS_CONN_F_ONE_PACKET : 0;

	{
		struct ip_vs_conn_param p;

		ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
				      &iph.saddr, pptr[0], &iph.daddr, pptr[1],
				      &p);
		cp = ip_vs_conn_new(&p, &dest->addr,
				    dest->port ? dest->port : pptr[1],
				    flags, dest, skb->mark);
		if (!cp) {
			*ignored = -1;
			return NULL;
		}
	}

	IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
		      "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
		      ip_vs_fwd_tag(cp),
		      IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
		      IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
		      IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
		      cp->flags, atomic_read(&cp->refcnt));

	ip_vs_conn_stats(cp, svc);
	return cp;
}


int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
		struct ip_vs_proto_data *pd)
{
	__be16 _ports[2], *pptr;
	struct ip_vs_iphdr iph;
#ifdef CONFIG_SYSCTL
	struct net *net;
	struct netns_ipvs *ipvs;
	int unicast;
#endif

	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);

	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
	if (pptr == NULL) {
		ip_vs_service_put(svc);
		return NF_DROP;
	}

#ifdef CONFIG_SYSCTL
	net = skb_net(skb);

#ifdef CONFIG_IP_VS_IPV6
	if (svc->af == AF_INET6)
		unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
	else
#endif
		unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);

	ipvs = net_ipvs(net);
	if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
		int ret;
		struct ip_vs_conn *cp;
		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
				      iph.protocol == IPPROTO_UDP)?
				      IP_VS_CONN_F_ONE_PACKET : 0;
		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };

		ip_vs_service_put(svc);

		
		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
		{
			struct ip_vs_conn_param p;
			ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
					      &iph.saddr, pptr[0],
					      &iph.daddr, pptr[1], &p);
			cp = ip_vs_conn_new(&p, &daddr, 0,
					    IP_VS_CONN_F_BYPASS | flags,
					    NULL, skb->mark);
			if (!cp)
				return NF_DROP;
		}

		
		ip_vs_in_stats(cp, skb);

		
		ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);

		
		ret = cp->packet_xmit(skb, cp, pd->pp);
		

		atomic_inc(&cp->in_pkts);
		ip_vs_conn_put(cp);
		return ret;
	}
Esempio n. 10
0
/*
 *  IPVS persistent scheduling function
 *  It creates a connection entry according to its template if exists,
 *  or selects a server and creates a connection entry plus a template.
 *  Locking: we are svc user (svc->refcnt), so we hold all dests too
 *  Protocols supported: TCP, UDP
 */
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
		    struct sk_buff *skb, __be16 src_port, __be16 dst_port,
		    int *ignored, struct ip_vs_iphdr *iph)
{
	struct ip_vs_conn *cp = NULL;
	struct ip_vs_dest *dest;
	struct ip_vs_conn *ct;
	__be16 dport = 0;		/* destination port to forward */
	unsigned int flags;
	struct ip_vs_conn_param param;
	const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
	union nf_inet_addr snet;	/* source network of the client,
					   after masking */

	/* Mask saddr with the netmask to adjust template granularity */
#ifdef CONFIG_IP_VS_IPV6
	if (svc->af == AF_INET6)
		ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
				 (__force __u32) svc->netmask);
	else
#endif
		snet.ip = iph->saddr.ip & svc->netmask;

	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
		      "mnet %s\n",
		      IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
		      IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
		      IP_VS_DBG_ADDR(svc->af, &snet));

	/*
	 * As far as we know, FTP is a very complicated network protocol, and
	 * it uses control connection and data connections. For active FTP,
	 * FTP server initialize data connection to the client, its source port
	 * is often 20. For passive FTP, FTP server tells the clients the port
	 * that it passively listens to,  and the client issues the data
	 * connection. In the tunneling or direct routing mode, the load
	 * balancer is on the client-to-server half of connection, the port
	 * number is unknown to the load balancer. So, a conn template like
	 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
	 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
	 * is created for other persistent services.
	 */
	{
		int protocol = iph->protocol;
		const union nf_inet_addr *vaddr = &iph->daddr;
		__be16 vport = 0;

		if (dst_port == svc->port) {
			/* non-FTP template:
			 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
			 * FTP template:
			 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
			 */
			if (svc->port != FTPPORT)
				vport = dst_port;
		} else {
			/* Note: persistent fwmark-based services and
			 * persistent port zero service are handled here.
			 * fwmark template:
			 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
			 * port zero template:
			 * <protocol,caddr,0,vaddr,0,daddr,0>
			 */
			if (svc->fwmark) {
				protocol = IPPROTO_IP;
				vaddr = &fwmark;
			}
		}
		/* return *ignored = -1 so NF_DROP can be used */
		if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
						  vaddr, vport, &param) < 0) {
			*ignored = -1;
			return NULL;
		}
	}

	/* Check if a template already exists */
	ct = ip_vs_ct_in_get(&param);
	if (!ct || !ip_vs_check_template(ct)) {
		struct ip_vs_scheduler *sched;

		/*
		 * No template found or the dest of the connection
		 * template is not available.
		 * return *ignored=0 i.e. ICMP and NF_DROP
		 */
		sched = rcu_dereference(svc->scheduler);
		if (sched) {
			/* read svc->sched_data after svc->scheduler */
			smp_rmb();
			dest = sched->schedule(svc, skb, iph);
		} else {
			dest = NULL;
		}
		if (!dest) {
			IP_VS_DBG(1, "p-schedule: no dest found.\n");
			kfree(param.pe_data);
			*ignored = 0;
			return NULL;
		}

		if (dst_port == svc->port && svc->port != FTPPORT)
			dport = dest->port;

		/* Create a template
		 * This adds param.pe_data to the template,
		 * and thus param.pe_data will be destroyed
		 * when the template expires */
		ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
				    IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
		if (ct == NULL) {
			kfree(param.pe_data);
			*ignored = -1;
			return NULL;
		}

		ct->timeout = svc->timeout;
	} else {
		/* set destination with the found template */
		dest = ct->dest;
		kfree(param.pe_data);
	}

	dport = dst_port;
	if (dport == svc->port && dest->port)
		dport = dest->port;

	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
		 && iph->protocol == IPPROTO_UDP) ?
		IP_VS_CONN_F_ONE_PACKET : 0;

	/*
	 *    Create a new connection according to the template
	 */
	ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
			      src_port, &iph->daddr, dst_port, &param);

	cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
			    skb->mark);
	if (cp == NULL) {
		ip_vs_conn_put(ct);
		*ignored = -1;
		return NULL;
	}

	/*
	 *    Add its control
	 */
	ip_vs_control_add(cp, ct);
	ip_vs_conn_put(ct);

	ip_vs_conn_stats(cp, svc);
	return cp;
}


/*
 *  IPVS main scheduling function
 *  It selects a server according to the virtual service, and
 *  creates a connection entry.
 *  Protocols supported: TCP, UDP
 *
 *  Usage of *ignored
 *
 * 1 :   protocol tried to schedule (eg. on SYN), found svc but the
 *       svc/scheduler decides that this packet should be accepted with
 *       NF_ACCEPT because it must not be scheduled.
 *
 * 0 :   scheduler can not find destination, so try bypass or
 *       return ICMP and then NF_DROP (ip_vs_leave).
 *
 * -1 :  scheduler tried to schedule but fatal error occurred, eg.
 *       ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
 *       failure such as missing Call-ID, ENOMEM on skb_linearize
 *       or pe_data. In this case we should return NF_DROP without
 *       any attempts to send ICMP with ip_vs_leave.
 */
struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
	       struct ip_vs_proto_data *pd, int *ignored,
	       struct ip_vs_iphdr *iph)
{
	struct ip_vs_protocol *pp = pd->pp;
	struct ip_vs_conn *cp = NULL;
	struct ip_vs_scheduler *sched;
	struct ip_vs_dest *dest;
	__be16 _ports[2], *pptr;
	unsigned int flags;

	*ignored = 1;
	/*
	 * IPv6 frags, only the first hit here.
	 */
	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
	if (pptr == NULL)
		return NULL;

	/*
	 * FTPDATA needs this check when using local real server.
	 * Never schedule Active FTPDATA connections from real server.
	 * For LVS-NAT they must be already created. For other methods
	 * with persistence the connection is created on SYN+ACK.
	 */
	if (pptr[0] == FTPDATA) {
		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
			      "Not scheduling FTPDATA");
		return NULL;
	}

	/*
	 *    Do not schedule replies from local real server.
	 */
	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
	    (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
			      "Not scheduling reply for existing connection");
		__ip_vs_conn_put(cp);
		return NULL;
	}

	/*
	 *    Persistent service
	 */
	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
					   iph);

	*ignored = 0;

	/*
	 *    Non-persistent service
	 */
	if (!svc->fwmark && pptr[1] != svc->port) {
		if (!svc->port)
			pr_err("Schedule: port zero only supported "
			       "in persistent services, "
			       "check your ipvs configuration\n");
		return NULL;
	}

	sched = rcu_dereference(svc->scheduler);
	if (sched) {
		/* read svc->sched_data after svc->scheduler */
		smp_rmb();
		dest = sched->schedule(svc, skb, iph);
	} else {
		dest = NULL;
	}
	if (dest == NULL) {
		IP_VS_DBG(1, "Schedule: no dest found.\n");
		return NULL;
	}

	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
		 && iph->protocol == IPPROTO_UDP) ?
		IP_VS_CONN_F_ONE_PACKET : 0;

	/*
	 *    Create a connection entry.
	 */
	{
		struct ip_vs_conn_param p;

		ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
				      &iph->saddr, pptr[0], &iph->daddr,
				      pptr[1], &p);
		cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
				    dest->port ? dest->port : pptr[1],
				    flags, dest, skb->mark);
		if (!cp) {
			*ignored = -1;
			return NULL;
		}
	}

	IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
		      "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
		      ip_vs_fwd_tag(cp),
		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
		      IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
		      cp->flags, atomic_read(&cp->refcnt));

	ip_vs_conn_stats(cp, svc);
	return cp;
}


/*
 *  Pass or drop the packet.
 *  Called by ip_vs_in, when the virtual service is available but
 *  no destination is available for a new connection.
 */
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
{
	__be16 _ports[2], *pptr;
#ifdef CONFIG_SYSCTL
	struct net *net;
	struct netns_ipvs *ipvs;
	int unicast;
#endif

	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
	if (pptr == NULL) {
		return NF_DROP;
	}

#ifdef CONFIG_SYSCTL
	net = skb_net(skb);

#ifdef CONFIG_IP_VS_IPV6
	if (svc->af == AF_INET6)
		unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
	else
#endif
		unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);

	/* if it is fwmark-based service, the cache_bypass sysctl is up
	   and the destination is a non-local unicast, then create
	   a cache_bypass connection entry */
	ipvs = net_ipvs(net);
	if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
		int ret;
		struct ip_vs_conn *cp;
		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
				      iph->protocol == IPPROTO_UDP) ?
				      IP_VS_CONN_F_ONE_PACKET : 0;
		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };

		/* create a new connection entry */
		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
		{
			struct ip_vs_conn_param p;
			ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
					      &iph->saddr, pptr[0],
					      &iph->daddr, pptr[1], &p);
			cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
					    IP_VS_CONN_F_BYPASS | flags,
					    NULL, skb->mark);
			if (!cp)
				return NF_DROP;
		}

		/* statistics */
		ip_vs_in_stats(cp, skb);

		/* set state */
		ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);

		/* transmit the first SYN packet */
		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
		/* do not touch skb anymore */

		atomic_inc(&cp->in_pkts);
		ip_vs_conn_put(cp);
		return ret;
	}