Example #1
0
static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct dccp_sock *dp;
	int val, len;

	if (get_user(len, optlen))
		return -EFAULT;

	if (len < (int)sizeof(int))
		return -EINVAL;

	dp = dccp_sk(sk);

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
		return 0;
	case DCCP_SOCKOPT_SERVICE:
		return dccp_getsockopt_service(sk, len,
					       (__be32 __user *)optval, optlen);
	case DCCP_SOCKOPT_GET_CUR_MPS:
		val = dp->dccps_mss_cache;
		len = sizeof(val);
		break;
	case DCCP_SOCKOPT_SEND_CSCOV:
		val = dp->dccps_pcslen;
		len = sizeof(val);
		break;
	case DCCP_SOCKOPT_RECV_CSCOV:
		val = dp->dccps_pcrlen;
		len = sizeof(val);
		break;
	case 128 ... 191:
		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	case 192 ... 255:
		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	default:
		return -ENOPROTOOPT;
	}

	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
		return -EFAULT;

	return 0;
}
Example #2
0
/*
 * This routine does path mtu discovery as defined in RFC1191.
 */
static inline void dccp_do_pmtu_discovery(struct sock *sk,
					  const struct iphdr *iph,
					  u32 mtu)
{
	struct dst_entry *dst;
	const struct inet_sock *inet = inet_sk(sk);
	const struct dccp_sock *dp = dccp_sk(sk);

	/* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
	 * send out by Linux are always < 576bytes so they should go through
	 * unfragmented).
	 */
	if (sk->sk_state == DCCP_LISTEN)
		return;

	/* We don't check in the destentry if pmtu discovery is forbidden
	 * on this route. We just assume that no packet_to_big packets
	 * are send back when pmtu discovery is not active.
     	 * There is a small race when the user changes this flag in the
	 * route, but I think that's acceptable.
	 */
	if ((dst = __sk_dst_check(sk, 0)) == NULL)
		return;

	dst->ops->update_pmtu(dst, mtu);

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
		dccp_sync_mss(sk, mtu);

		/*
		 * From RFC 4340, sec. 14.1:
		 *
		 *	DCCP-Sync packets are the best choice for upward
		 *	probing, since DCCP-Sync probes do not risk application
		 *	data loss.
		 */
		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
	} /* else let the usual retransmit timer handle it */
}
Example #3
0
static int dccp_hdlr_ackvec(struct sock *sk, u64 enable, bool rx)
{
	struct dccp_sock *dp = dccp_sk(sk);

	if (rx) {
		if (enable && dp->dccps_hc_rx_ackvec == NULL) {
			dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(gfp_any());
			if (dp->dccps_hc_rx_ackvec == NULL)
				return -ENOMEM;
		} else if (!enable) {
			dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
			dp->dccps_hc_rx_ackvec = NULL;
		}
	}
	return 0;
}
Example #4
0
/*
 * Do all connect socket setups that can be done AF independent.
 */
static inline void dccp_connect_init(struct sock *sk)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct dst_entry *dst = __sk_dst_get(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk->sk_err = 0;
	sock_reset_flag(sk, SOCK_DONE);

	dccp_sync_mss(sk, dst_mtu(dst));

	/* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
	dp->dccps_gar = dp->dccps_iss;

	icsk->icsk_retransmits = 0;
}
Example #5
0
File: output.c Project: 274914765/C
void dccp_write_xmit(struct sock *sk, int block)
{
    struct dccp_sock *dp = dccp_sk(sk);
    struct sk_buff *skb;

    while ((skb = skb_peek(&sk->sk_write_queue))) {
        int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

        if (err > 0) {
            if (!block) {
                sk_reset_timer(sk, &dp->dccps_xmit_timer,
                        msecs_to_jiffies(err)+jiffies);
                break;
            } else
                err = dccp_wait_for_ccid(sk, skb, err);
            if (err && err != -EINTR)
                DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
        }

        skb_dequeue(&sk->sk_write_queue);
        if (err == 0) {
            struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
            const int len = skb->len;

            if (sk->sk_state == DCCP_PARTOPEN) {
                /* See 8.1.5.  Handshake Completion */
                inet_csk_schedule_ack(sk);
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                          inet_csk(sk)->icsk_rto,
                          DCCP_RTO_MAX);
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            } else if (dccp_ack_pending(sk))
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            else
                dcb->dccpd_type = DCCP_PKT_DATA;

            err = dccp_transmit_skb(sk, skb);
            ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
            if (err)
                DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
                     err);
        } else {
            dccp_pr_debug("packet discarded due to err=%d\n", err);
            kfree_skb(skb);
        }
    }
}
Example #6
0
/*
 * The three way handshake has completed - we got a valid ACK or DATAACK -
 * now create the new socket.
 *
 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
 */
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
				       struct request_sock *req,
				       struct dst_entry *dst)
{
	struct inet_request_sock *ireq;
	struct inet_sock *newinet;
	struct dccp_sock *newdp;
	struct sock *newsk;

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
		goto exit;

	newsk = dccp_create_openreq_child(sk, req, skb);
	if (newsk == NULL)
		goto exit;

	sk_setup_caps(newsk, dst);

	newdp		   = dccp_sk(newsk);
	newinet		   = inet_sk(newsk);
	ireq		   = inet_rsk(req);
	newinet->daddr	   = ireq->rmt_addr;
	newinet->rcv_saddr = ireq->loc_addr;
	newinet->saddr	   = ireq->loc_addr;
	newinet->opt	   = ireq->opt;
	ireq->opt	   = NULL;
	newinet->mc_index  = inet_iif(skb);
	newinet->mc_ttl	   = skb->nh.iph->ttl;
	newinet->id	   = jiffies;

	dccp_sync_mss(newsk, dst_mtu(dst));

	__inet_hash_nolisten(newsk);
	__inet_inherit_port(sk, newsk);

	return newsk;

exit_overflow:
	NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
exit:
	NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
	dst_release(dst);
	return NULL;
}
Example #7
0
/**
 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
 * @sk: socket to wait for
 * @timeo: for how long
 */
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
			      long *timeo)
{
	struct dccp_sock *dp = dccp_sk(sk);
	DEFINE_WAIT(wait);
	long delay;
	int rc;

	while (1) {
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);

		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
			goto do_error;
		if (!*timeo)
			goto do_nonblock;
		if (signal_pending(current))
			goto do_interrupted;

		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					    skb->len);
		if (rc <= 0)
			break;
		delay = msecs_to_jiffies(rc);
		if (delay > *timeo || delay < 0)
			goto do_nonblock;

		sk->sk_write_pending++;
		release_sock(sk);
		*timeo -= schedule_timeout(delay);
		lock_sock(sk);
		sk->sk_write_pending--;
	}
out:
	finish_wait(sk->sk_sleep, &wait);
	return rc;

do_error:
	rc = -EPIPE;
	goto out;
do_nonblock:
	rc = -EAGAIN;
	goto out;
do_interrupted:
	rc = sock_intr_errno(*timeo);
	goto out;
}
Example #8
0
static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
				       const enum dccp_reset_codes code)
{
	struct dccp_hdr *dh;
	struct dccp_sock *dp = dccp_sk(sk);
	const u32 dccp_header_size = sizeof(struct dccp_hdr) +
				     sizeof(struct dccp_hdr_ext) +
				     sizeof(struct dccp_hdr_reset);
	struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
					   GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	/* Reserve space for headers. */
	skb_reserve(skb, sk->sk_prot->max_header);

	skb->dst = dst_clone(dst);

	dccp_inc_seqno(&dp->dccps_gss);

	DCCP_SKB_CB(skb)->dccpd_reset_code = code;
	DCCP_SKB_CB(skb)->dccpd_type	   = DCCP_PKT_RESET;
	DCCP_SKB_CB(skb)->dccpd_seq	   = dp->dccps_gss;

	if (dccp_insert_options(sk, skb)) {
		kfree_skb(skb);
		return NULL;
	}

	dh = dccp_zeroed_hdr(skb, dccp_header_size);

	dh->dccph_sport	= inet_sk(sk)->sport;
	dh->dccph_dport	= inet_sk(sk)->dport;
	dh->dccph_doff	= (dccp_header_size +
			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
	dh->dccph_type	= DCCP_PKT_RESET;
	dh->dccph_x	= 1;
	dccp_hdr_set_seq(dh, dp->dccps_gss);
	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);

	dccp_hdr_reset(skb)->dccph_reset_code = code;
	inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb);

	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
	return skb;
}
Example #9
0
/*
 * Feature activation handlers.
 *
 * These all use an u64 argument, to provide enough room for NN/SP features. At
 * this stage the negotiated values have been checked to be within their range.
 */
static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct ccid *new_ccid = ccid_new(ccid, sk, rx);

	if (new_ccid == NULL)
		return -ENOMEM;

	if (rx) {
		ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
		dp->dccps_hc_rx_ccid = new_ccid;
	} else {
		ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
		dp->dccps_hc_tx_ccid = new_ccid;
	}
	return 0;
}
Example #10
0
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
{
	/*
	 *   Step 7: Check for unexpected packet types
	 *      If (S.is_server and P.type == CloseReq)
	 *	  Send Sync packet acknowledging P.seqno
	 *	  Drop packet and return
	 */
	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
		return;
	}

	if (sk->sk_state != DCCP_CLOSING)
		dccp_set_state(sk, DCCP_CLOSING);
	dccp_send_close(sk, 0);
}
Example #11
0
static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{
	int queued = 0;

	switch (sk->sk_state) {
	/*
	 * We ignore Close when received in one of the following states:
	 *  - CLOSED		(may be a late or duplicate packet)
	 *  - PASSIVE_CLOSEREQ	(the peer has sent a CloseReq earlier)
	 *  - RESPOND		(already handled by dccp_check_req)
	 */
	case DCCP_CLOSING:
		/*
		 * Simultaneous-close: receiving a Close after sending one. This
		 * can happen if both client and server perform active-close and
		 * will result in an endless ping-pong of crossing and retrans-
		 * mitted Close packets, which only terminates when one of the
		 * nodes times out (min. 64 seconds). Quicker convergence can be
		 * achieved when one of the nodes acts as tie-breaker.
		 * This is ok as both ends are done with data transfer and each
		 * end is just waiting for the other to acknowledge termination.
		 */
		if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
			break;
		/* fall through */
	case DCCP_REQUESTING:
	case DCCP_ACTIVE_CLOSEREQ:
		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
		dccp_done(sk);
		break;
	case DCCP_OPEN:
	case DCCP_PARTOPEN:
		/* Give waiting application a chance to read pending data */
		queued = 1;
		dccp_fin(sk, skb);
		dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
		/* fall through */
	case DCCP_PASSIVE_CLOSE:
		/*
		 * Retransmitted Close: we have already enqueued the first one.
		 */
		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
	}
	return queued;
}
Example #12
0
static void dccp_get_info(struct sock *sk, struct tcp_info *info)
{
	struct dccp_sock *dp = dccp_sk(sk);
	const struct inet_connection_sock *icsk = inet_csk(sk);

	memset(info, 0, sizeof(*info));

	info->tcpi_state	= sk->sk_state;
	info->tcpi_retransmits	= icsk->icsk_retransmits;
	info->tcpi_probes	= icsk->icsk_probes_out;
	info->tcpi_backoff	= icsk->icsk_backoff;
	info->tcpi_pmtu		= icsk->icsk_pmtu_cookie;

	if (dccp_msk(sk)->dccpms_send_ack_vector)
		info->tcpi_options |= TCPI_OPT_SACK;

	ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info);
	ccid_hc_tx_get_info(dp->dccps_hc_tx_ccid, sk, info);
}
Example #13
0
int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
{
	struct dccp_sock *dp = dccp_sk(sk);

	DCCP_SKB_CB(skb)->dccpd_opt_len = 0;

	if (dp->dccps_send_ndp_count && dccp_insert_option_ndp(sk, skb))
		return -1;

	if (DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATA) {

		/* Feature Negotiation */
		if (dccp_feat_insert_opts(dp, NULL, skb))
			return -1;

		if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
			/*
			 * Obtain RTT sample from Request/Response exchange.
			 * This is currently used for TFRC initialisation.
			 */
			if (dccp_insert_option_timestamp(skb))
				return -1;

		} else if (dp->dccps_hc_rx_ackvec != NULL &&
			   dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
			   dccp_insert_option_ackvec(sk, skb)) {
				return -1;
		}
	}

	if (dp->dccps_hc_rx_insert_options) {
		if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb))
			return -1;
		dp->dccps_hc_rx_insert_options = 0;
	}

	if (dp->dccps_timestamp_echo != 0 &&
	    dccp_insert_option_timestamp_echo(dp, NULL, skb))
		return -1;

	dccp_insert_option_padding(skb);
	return 0;
}
Example #14
0
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
						   struct sk_buff *skb,
						   const struct dccp_hdr *dh,
						   const unsigned len)
{
	int queued = 0;

	switch (dh->dccph_type) {
	case DCCP_PKT_RESET:
		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
		break;
	case DCCP_PKT_DATA:
		if (sk->sk_state == DCCP_RESPOND)
			break;
	case DCCP_PKT_DATAACK:
	case DCCP_PKT_ACK:
		/*
		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
		 * here but only if we haven't used the DELACK timer for
		 * something else, like sending a delayed ack for a TIMESTAMP
		 * echo, etc, for now were not clearing it, sending an extra
		 * ACK when there is nothing else to do in DELACK is not a big
		 * deal after all.
		 */

		/* Stop the PARTOPEN timer */
		if (sk->sk_state == DCCP_PARTOPEN)
			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);

		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
		dccp_set_state(sk, DCCP_OPEN);

		if (dh->dccph_type == DCCP_PKT_DATAACK ||
		    dh->dccph_type == DCCP_PKT_DATA) {
			__dccp_rcv_established(sk, skb, dh, len);
			queued = 1; /* packet was queued
				       (by __dccp_rcv_established) */
		}
		break;
	}

	return queued;
}
Example #15
0
File: ipv4.c Project: panyfx/ath
/*
 * This routine does path mtu discovery as defined in RFC1191.
 */
static inline void dccp_do_pmtu_discovery(struct sock *sk,
					  const struct iphdr *iph,
					  u32 mtu)
{
	struct dst_entry *dst;
	const struct inet_sock *inet = inet_sk(sk);
	const struct dccp_sock *dp = dccp_sk(sk);

	/* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
	 * send out by Linux are always < 576bytes so they should go through
	 * unfragmented).
	 */
	if (sk->sk_state == DCCP_LISTEN)
		return;

	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
	    ip_sk_accept_pmtu(sk) &&
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
		dccp_sync_mss(sk, mtu);

		/*
		 * From RFC 4340, sec. 14.1:
		 *
		 *	DCCP-Sync packets are the best choice for upward
		 *	probing, since DCCP-Sync probes do not risk application
		 *	data loss.
		 */
		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
	} /* else let the usual retransmit timer handle it */
}
Example #16
0
void dccp_insert_option_elapsed_time(struct sock *sk,
				     struct sk_buff *skb,
				     u32 elapsed_time)
{
#ifdef CONFIG_IP_DCCP_DEBUG
	struct dccp_sock *dp = dccp_sk(sk);
	const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
					"CLIENT TX opt: " : "server TX opt: ";
#endif
	const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
	const int len = 2 + elapsed_time_len;
	unsigned char *to;

	if (elapsed_time_len == 0)
		return;

	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
		LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to "
					 "insert elapsed time!\n");
		return;
	}

	DCCP_SKB_CB(skb)->dccpd_opt_len += len;

	to    = skb_push(skb, len);
	*to++ = DCCPO_ELAPSED_TIME;
	*to++ = len;

	if (elapsed_time_len == 2) {
		const u16 var16 = htons((u16)elapsed_time);
		memcpy(to, &var16, 2);
	} else {
		const u32 var32 = htonl(elapsed_time);
		memcpy(to, &var32, 4);
	}

	dccp_pr_debug("%sELAPSED_TIME=%u, len=%d, seqno=%llu\n",
		      debug_prefix, elapsed_time,
		      len,
		      (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
}
int dccp_getsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct dccp_sock *dp;
	int val, len;

	if (level != SOL_DCCP)
		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
							     optname, optval,
							     optlen);
	if (get_user(len, optlen))
		return -EFAULT;

	if (len < sizeof(int))
		return -EINVAL;

	dp = dccp_sk(sk);

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		val = dp->dccps_packet_size;
		len = sizeof(dp->dccps_packet_size);
		break;
	case DCCP_SOCKOPT_SERVICE:
		return dccp_getsockopt_service(sk, len,
					       (u32 __user *)optval, optlen);
	case 128 ... 191:
		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	case 192 ... 255:
		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	default:
		return -ENOPROTOOPT;
	}

	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
		return -EFAULT;

	return 0;
}
Example #18
0
unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct dccp_sock *dp = dccp_sk(sk);
	u32 ccmps = dccp_determine_ccmps(dp);
	u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;

	
	cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
		    sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));

	
	cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
			   (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);

	
	icsk->icsk_pmtu_cookie = pmtu;
	dp->dccps_mss_cache = cur_mps;

	return cur_mps;
}
Example #19
0
int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
{
	struct dccp_sock *dp = dccp_sk(sk);

	DCCP_SKB_CB(skb)->dccpd_opt_len = 0;

	if (dp->dccps_send_ndp_count && dccp_insert_option_ndp(sk, skb))
		return -1;

	if (DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATA) {

		
		if (dccp_feat_insert_opts(dp, NULL, skb))
			return -1;

		if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
			
			if (dccp_insert_option_timestamp(sk, skb))
				return -1;

		} else if (dp->dccps_hc_rx_ackvec != NULL &&
			   dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
			   dccp_insert_option_ackvec(sk, skb)) {
				return -1;
		}
	}

	if (dp->dccps_hc_rx_insert_options) {
		if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb))
			return -1;
		dp->dccps_hc_rx_insert_options = 0;
	}

	if (dp->dccps_timestamp_echo != 0 &&
	    dccp_insert_option_timestamp_echo(dp, NULL, skb))
		return -1;

	dccp_insert_option_padding(skb);
	return 0;
}
Example #20
0
/*
 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
 * any circumstances.
 */
void dccp_send_close(struct sock *sk, const int active)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;

	skb = alloc_skb(sk->sk_prot->max_header, prio);
	if (skb == NULL)
		return;

	/* Reserve space for headers and prepare control bits. */
	skb_reserve(skb, sk->sk_prot->max_header);
	skb->csum = 0;
	DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
					DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;

	if (active) {
		dccp_skb_entail(sk, skb);
		dccp_transmit_skb(sk, skb_clone(skb, prio));
	} else
		dccp_transmit_skb(sk, skb);
}
Example #21
0
void dccp_insert_options(struct sock *sk, struct sk_buff *skb)
{
	struct dccp_sock *dp = dccp_sk(sk);

	DCCP_SKB_CB(skb)->dccpd_opt_len = 0;

	if (dp->dccps_options.dccpo_send_ndp_count)
		dccp_insert_option_ndp(sk, skb);

	if (!dccp_packet_without_ack(skb)) {
		if (dp->dccps_options.dccpo_send_ack_vector &&
		    dccp_ackvec_pending(dp->dccps_hc_rx_ackvec))
			dccp_insert_option_ackvec(sk, skb);
		if (dp->dccps_timestamp_echo != 0)
			dccp_insert_option_timestamp_echo(sk, skb);
	}

	if (dp->dccps_hc_rx_insert_options) {
		ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb);
		dp->dccps_hc_rx_insert_options = 0;
	}
	if (dp->dccps_hc_tx_insert_options) {
		ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb);
		dp->dccps_hc_tx_insert_options = 0;
	}

	/* XXX: insert other options when appropriate */

	if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) {
		/* The length of all options has to be a multiple of 4 */
		int padding = DCCP_SKB_CB(skb)->dccpd_opt_len % 4;

		if (padding != 0) {
			padding = 4 - padding;
			memset(skb_push(skb, padding), 0, padding);
			DCCP_SKB_CB(skb)->dccpd_opt_len += padding;
		}
	}
}
Example #22
0
/**
 * dccp_flush_write_queue  -  Drain queue at end of connection
 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
 * happen that the TX queue is not empty at the end of a connection. We give the
 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
 * returns with a non-empty write queue, it will be purged later.
 */
void dccp_flush_write_queue(struct sock *sk, long *time_budget)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	long delay, rc;

	while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		switch (ccid_packet_dequeue_eval(rc)) {
		case CCID_PACKET_WILL_DEQUEUE_LATER:
			/*
			 * If the CCID determines when to send, the next sending
			 * time is unknown or the CCID may not even send again
			 * (e.g. remote host crashes or lost Ack packets).
			 */
			DCCP_WARN("CCID did not manage to send all packets\n");
			return;
		case CCID_PACKET_DELAY:
			delay = msecs_to_jiffies(rc);
			if (delay > *time_budget)
				return;
			rc = dccp_wait_for_ccid(sk, delay);
			if (rc < 0)
				return;
			*time_budget -= (delay - rc);
			/* check again if we can send now */
			break;
		case CCID_PACKET_SEND_AT_ONCE:
			dccp_xmit_packet(sk);
			break;
		case CCID_PACKET_ERR:
			skb_dequeue(&sk->sk_write_queue);
			kfree_skb(skb);
			dccp_pr_debug("packet discarded due to err=%ld\n", rc);
		}
	}
}
int dccp_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int optlen)
{
	struct dccp_sock *dp;
	int err;
	int val;

	if (level != SOL_DCCP)
		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
							     optname, optval,
							     optlen);

	if (optlen < sizeof(int))
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

	if (optname == DCCP_SOCKOPT_SERVICE)
		return dccp_setsockopt_service(sk, val, optval, optlen);

	lock_sock(sk);
	dp = dccp_sk(sk);
	err = 0;

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		dp->dccps_packet_size = val;
		break;
	default:
		err = -ENOPROTOOPT;
		break;
	}
	
	release_sock(sk);
	return err;
}
Example #24
0
File: proto.c Project: 7799/linux
void dccp_set_state(struct sock *sk, const int state)
{
	const int oldstate = sk->sk_state;

	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
		      dccp_state_name(oldstate), dccp_state_name(state));
	WARN_ON(state == oldstate);

	switch (state) {
	case DCCP_OPEN:
		if (oldstate != DCCP_OPEN)
			DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
		/* Client retransmits all Confirm options until entering OPEN */
		if (oldstate == DCCP_PARTOPEN)
			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
		break;

	case DCCP_CLOSED:
		if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
		    oldstate == DCCP_CLOSING)
			DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);

		sk->sk_prot->unhash(sk);
		if (inet_csk(sk)->icsk_bind_hash != NULL &&
		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
			inet_put_port(sk);
		/* fall through */
	default:
		if (oldstate == DCCP_OPEN)
			DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
	}

	/* Change state AFTER socket is unhashed to avoid closed
	 * socket sitting in hash tables.
	 */
	sk->sk_state = state;
}
Example #25
0
File: output.c Project: 274914765/C
/**
 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
 * @sk:    socket to wait for
 * @skb:   current skb to pass on for waiting
 * @delay: sleep timeout in milliseconds (> 0)
 * This function is called by default when the socket is closed, and
 * when a non-zero linger time is set on the socket. For consistency
 */
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
{
    struct dccp_sock *dp = dccp_sk(sk);
    DEFINE_WAIT(wait);
    unsigned long jiffdelay;
    int rc;

    do {
        dccp_pr_debug("delayed send by %d msec\n", delay);
        jiffdelay = msecs_to_jiffies(delay);

        prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);

        sk->sk_write_pending++;
        release_sock(sk);
        schedule_timeout(jiffdelay);
        lock_sock(sk);
        sk->sk_write_pending--;

        if (sk->sk_err)
            goto do_error;
        if (signal_pending(current))
            goto do_interrupted;

        rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
    } while ((delay = rc) > 0);
out:
    finish_wait(sk->sk_sleep, &wait);
    return rc;

do_error:
    rc = -EPIPE;
    goto out;
do_interrupted:
    rc = -EINTR;
    goto out;
}
Example #26
0
/*
 * Do all connect socket setups that can be done AF independent.
 */
static inline void dccp_connect_init(struct sock *sk)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct dst_entry *dst = __sk_dst_get(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk->sk_err = 0;
	sock_reset_flag(sk, SOCK_DONE);
	
	dccp_sync_mss(sk, dst_mtu(dst));

	dccp_update_gss(sk, dp->dccps_iss);
 	/*
	 * SWL and AWL are initially adjusted so that they are not less than
	 * the initial Sequence Numbers received and sent, respectively:
	 *	SWL := max(GSR + 1 - floor(W/4), ISR),
	 *	AWL := max(GSS - W' + 1, ISS).
	 * These adjustments MUST be applied only at the beginning of the
	 * connection.
 	 */
	dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));

	icsk->icsk_retransmits = 0;
}
Example #27
0
File: proto.c Project: 7799/linux
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);

	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
	sk->sk_state		= DCCP_CLOSED;
	sk->sk_write_space	= dccp_write_space;
	icsk->icsk_sync_mss	= dccp_sync_mss;
	dp->dccps_mss_cache	= 536;
	dp->dccps_rate_last	= jiffies;
	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
	dp->dccps_tx_qlen	= sysctl_dccp_tx_qlen;

	dccp_init_xmit_timers(sk);

	INIT_LIST_HEAD(&dp->dccps_featneg);
	/* control socket doesn't need feat nego */
	if (likely(ctl_sock_initialized))
		return dccp_feat_init(sk);
	return 0;
}
Example #28
0
File: output.c Project: 274914765/C
/*
 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
 * any circumstances.
 */
void dccp_send_close(struct sock *sk, const int active)
{
    struct dccp_sock *dp = dccp_sk(sk);
    struct sk_buff *skb;
    const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;

    skb = alloc_skb(sk->sk_prot->max_header, prio);
    if (skb == NULL)
        return;

    /* Reserve space for headers and prepare control bits. */
    skb_reserve(skb, sk->sk_prot->max_header);
    if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
        DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
    else
        DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;

    if (active) {
        dccp_write_xmit(sk, 1);
        dccp_skb_entail(sk, skb);
        dccp_transmit_skb(sk, skb_clone(skb, prio));
        /*
         * Retransmission timer for active-close: RFC 4340, 8.3 requires
         * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
         * state can be left. The initial timeout is 2 RTTs.
         * Since RTT measurement is done by the CCIDs, there is no easy
         * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
         * is too low (200ms); we use a high value to avoid unnecessary
         * retransmissions when the link RTT is > 0.2 seconds.
         * FIXME: Let main module sample RTTs and use that instead.
         */
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                      DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
    } else
        dccp_transmit_skb(sk, skb);
}
Example #29
0
void dccp_write_xmit(struct sock *sk)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = dccp_qpolicy_top(sk))) {
		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		switch (ccid_packet_dequeue_eval(rc)) {
		case CCID_PACKET_WILL_DEQUEUE_LATER:
			return;
		case CCID_PACKET_DELAY:
			sk_reset_timer(sk, &dp->dccps_xmit_timer,
				       jiffies + msecs_to_jiffies(rc));
			return;
		case CCID_PACKET_SEND_AT_ONCE:
			dccp_xmit_packet(sk);
			break;
		case CCID_PACKET_ERR:
			dccp_qpolicy_drop(sk, skb);
			dccp_pr_debug("packet discarded due to err=%d\n", rc);
		}
	}
}
Example #30
0
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
{
	struct dccp_sock *dp = dccp_sk(sk);
	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2);

	/*
	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
	 */
	if (val == 0 || val > max_ratio) {
		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
		val = max_ratio;
	}
	if (val > DCCPF_ACK_RATIO_MAX)
		val = DCCPF_ACK_RATIO_MAX;

	if (val == dp->dccps_l_ack_ratio)
		return;

	ccid2_pr_debug("changing local ack ratio to %u\n", val);
	dp->dccps_l_ack_ratio = val;
}