Esempio n. 1
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				/*
				 * See 8.1.5 - Handshake Completion.
				 *
				 * For robustness we resend Confirm options until the client has
				 * entered OPEN. During the initial feature negotiation, the MPS
				 * is smaller than usual, reduced by the Change/Confirm options.
				 */
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
Esempio n. 2
0
int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					 skb->len);

	if (err > 0)
		err = dccp_wait_for_ccid(sk, skb, timeo);

	if (err == 0) {
		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
		const int len = skb->len;

		if (sk->sk_state == DCCP_PARTOPEN) {
			/* See 8.1.5.  Handshake Completion */
			inet_csk_schedule_ack(sk);
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
			dcb->dccpd_type = DCCP_PKT_DATAACK;
		} else if (dccp_ack_pending(sk))
			dcb->dccpd_type = DCCP_PKT_DATAACK;
		else
			dcb->dccpd_type = DCCP_PKT_DATA;

		err = dccp_transmit_skb(sk, skb);
		ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
	} else
		kfree_skb(skb);

	return err;
}
Esempio n. 3
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
Esempio n. 4
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	long timeo = 30000; 	/* If a packet is taking longer than 2 secs
				   we have other issues */

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					 skb->len);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, &timeo);
			if (err) {
				printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
						 " %d\n", __FUNCTION__, err);
				dump_stack();
			}
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				/* See 8.1.5.  Handshake Completion */
				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err) {
				printk(KERN_CRIT "%s:err from "
					         "ccid_hc_tx_packet_sent %d\n",
					         __FUNCTION__, err);
				dump_stack();
			}
		} else
			kfree(skb);
	}
}
Esempio n. 5
0
File: output.c Progetto: 274914765/C
void dccp_write_xmit(struct sock *sk, int block)
{
    struct dccp_sock *dp = dccp_sk(sk);
    struct sk_buff *skb;

    while ((skb = skb_peek(&sk->sk_write_queue))) {
        int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

        if (err > 0) {
            if (!block) {
                sk_reset_timer(sk, &dp->dccps_xmit_timer,
                        msecs_to_jiffies(err)+jiffies);
                break;
            } else
                err = dccp_wait_for_ccid(sk, skb, err);
            if (err && err != -EINTR)
                DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
        }

        skb_dequeue(&sk->sk_write_queue);
        if (err == 0) {
            struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
            const int len = skb->len;

            if (sk->sk_state == DCCP_PARTOPEN) {
                /* See 8.1.5.  Handshake Completion */
                inet_csk_schedule_ack(sk);
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                          inet_csk(sk)->icsk_rto,
                          DCCP_RTO_MAX);
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            } else if (dccp_ack_pending(sk))
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            else
                dcb->dccpd_type = DCCP_PKT_DATA;

            err = dccp_transmit_skb(sk, skb);
            ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
            if (err)
                DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
                     err);
        } else {
            dccp_pr_debug("packet discarded due to err=%d\n", err);
            kfree_skb(skb);
        }
    }
}
Esempio n. 6
0
/**
 * dccp_flush_write_queue  -  Drain queue at end of connection
 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
 * happen that the TX queue is not empty at the end of a connection. We give the
 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
 * returns with a non-empty write queue, it will be purged later.
 */
void dccp_flush_write_queue(struct sock *sk, long *time_budget)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	long delay, rc;

	while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		switch (ccid_packet_dequeue_eval(rc)) {
		case CCID_PACKET_WILL_DEQUEUE_LATER:
			/*
			 * If the CCID determines when to send, the next sending
			 * time is unknown or the CCID may not even send again
			 * (e.g. remote host crashes or lost Ack packets).
			 */
			DCCP_WARN("CCID did not manage to send all packets\n");
			return;
		case CCID_PACKET_DELAY:
			delay = msecs_to_jiffies(rc);
			if (delay > *time_budget)
				return;
			rc = dccp_wait_for_ccid(sk, delay);
			if (rc < 0)
				return;
			*time_budget -= (delay - rc);
			/* check again if we can send now */
			break;
		case CCID_PACKET_SEND_AT_ONCE:
			dccp_xmit_packet(sk);
			break;
		case CCID_PACKET_ERR:
			skb_dequeue(&sk->sk_write_queue);
			kfree_skb(skb);
			dccp_pr_debug("packet discarded due to err=%ld\n", rc);
		}
	}
}