static void dn_slow_timer(unsigned long arg)
{
	struct sock *sk = (struct sock *)arg;
	struct dn_scp *scp = DN_SK(sk);

	bh_lock_sock(sk);

	if (sock_owned_by_user(sk)) {
		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
		goto out;
	}

	if (scp->persist && scp->persist_fxn) {
		if (scp->persist <= SLOW_INTERVAL) {
			scp->persist = 0;

			if (scp->persist_fxn(sk))
				goto out;
		} else {
			scp->persist -= SLOW_INTERVAL;
		}
	}

	if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
		if ((jiffies - scp->stamp) >= scp->keepalive)
			scp->keepalive_fxn(sk);
	}

	sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Esempio n. 2
0
File: output.c Progetto: 274914765/C
/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
void dccp_send_delayed_ack(struct sock *sk)
{
    struct inet_connection_sock *icsk = inet_csk(sk);
    /*
     * FIXME: tune this timer. elapsed time fixes the skew, so no problem
     * with using 2s, and active senders also piggyback the ACK into a
     * DATAACK packet, so this is really for quiescent senders.
     */
    unsigned long timeout = jiffies + 2 * HZ;

    /* Use new timeout only if there wasn't a older one earlier. */
    if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
        /* If delack timer was blocked or is about to expire,
         * send ACK now.
         *
         * FIXME: check the "about to expire" part
         */
        if (icsk->icsk_ack.blocked) {
            dccp_send_ack(sk);
            return;
        }

        if (!time_before(timeout, icsk->icsk_ack.timeout))
            timeout = icsk->icsk_ack.timeout;
    }
    icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
    icsk->icsk_ack.timeout = timeout;
    sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
Esempio n. 3
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				/*
				 * See 8.1.5 - Handshake Completion.
				 *
				 * For robustness we resend Confirm options until the client has
				 * entered OPEN. During the initial feature negotiation, the MPS
				 * is smaller than usual, reduced by the Change/Confirm options.
				 */
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
static void setup_sock_sync_close_timer(struct sock *sk)
{
	struct bastet_sock *bsk = sk->bastet;

	bsk->bastet_timer_event = BST_TMR_CLOSE_SOCK;

	sk_reset_timer(sk, &bsk->bastet_timer, jiffies + BST_SKIP_SOCK_OWNER_TIME);
}
Esempio n. 5
0
static void ccid2_start_rto_timer(struct sock *sk)
{
	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);

	ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);

	BUG_ON(timer_pending(&hc->tx_rtotimer));
	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
}
/*
 * Setup bastet sock request sync timer,
 * Insure user sock can turn to valid after a while.
 */
static void setup_sock_sync_request_timer(struct sock *sk, bool retry)
{
	struct bastet_sock *bsk = sk->bastet;

	bsk->sync_retry = retry;
	bsk->bastet_timer_event = BST_TMR_REQ_SOCK_SYNC;
	bsk->bastet_timeout = jiffies + BST_REQ_SOCK_SYNC_TIME;

	sk_reset_timer(sk, &bsk->bastet_timer, bsk->bastet_timeout);
}
/*
 * Setup bastet sock delay sync timer.
 */
static void setup_sock_sync_delay_timer(struct sock *sk)
{
	struct bastet_sock *bsk = sk->bastet;

	BASTET_LOGI("delay_time=%ld", bsk->delay_sync_time_section);
	bsk->bastet_timer_event = BST_TMR_DELAY_SOCK_SYNC;
	bsk->bastet_timeout = bsk->last_sock_active_time_point + bsk->delay_sync_time_section;

	bastet_wakelock_acquire_timeout(bsk->delay_sync_time_section + BST_WAKELOCK_TIMEOUT);
	sk_reset_timer(sk, &bsk->bastet_timer, bsk->bastet_timeout);
}
Esempio n. 8
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
Esempio n. 9
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	long timeo = 30000; 	/* If a packet is taking longer than 2 secs
				   we have other issues */

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					 skb->len);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, &timeo);
			if (err) {
				printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
						 " %d\n", __FUNCTION__, err);
				dump_stack();
			}
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				/* See 8.1.5.  Handshake Completion */
				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err) {
				printk(KERN_CRIT "%s:err from "
					         "ccid_hc_tx_packet_sent %d\n",
					         __FUNCTION__, err);
				dump_stack();
			}
		} else
			kfree(skb);
	}
}
Esempio n. 10
0
static void dccp_write_xmit_timer(unsigned long data) {
	struct sock *sk = (struct sock *)data;
	struct dccp_sock *dp = dccp_sk(sk);

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk))
		sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
	else
		dccp_write_xmit(sk, 0);
	bh_unlock_sock(sk);
	sock_put(sk);
}
Esempio n. 11
0
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
	struct sock *sk = (struct sock *)data;
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
	long s;

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer,
			       jiffies + HZ / 5);
		goto out;
	}

	ccid2_pr_debug("RTO_EXPIRE\n");

	ccid2_hc_tx_check_sanity(hctx);

	/* back-off timer */
	hctx->ccid2hctx_rto <<= 1;

	s = hctx->ccid2hctx_rto / HZ;
	if (s > 60)
		hctx->ccid2hctx_rto = 60 * HZ;

	ccid2_start_rto_timer(sk);

	/* adjust pipe, cwnd etc */
	hctx->ccid2hctx_pipe = 0;
	hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1;
	if (hctx->ccid2hctx_ssthresh < 2)
		hctx->ccid2hctx_ssthresh = 2;
	ccid2_change_cwnd(sk, 1);

	/* clear state about stuff we sent */
	hctx->ccid2hctx_seqt	= hctx->ccid2hctx_seqh;
	hctx->ccid2hctx_ssacks	= 0;
	hctx->ccid2hctx_acks	= 0;
	hctx->ccid2hctx_sent	= 0;

	/* clear ack ratio state. */
	hctx->ccid2hctx_arsent	 = 0;
	hctx->ccid2hctx_ackloss  = 0;
	hctx->ccid2hctx_rpseq	 = 0;
	hctx->ccid2hctx_rpdupack = -1;
	ccid2_change_l_ack_ratio(sk, 1);
	ccid2_hc_tx_check_sanity(hctx);
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
/*
 * Setup sock set timer.
 */
static int setup_sock_sync_set_timer(struct sock *sk, struct bst_sock_sync_prop *sync_p)
{
	struct bastet_sock *bsk = sk->bastet;

	bsk->sync_p = kmalloc(sizeof(*sync_p), GFP_KERNEL);
	if (NULL == bsk->sync_p) {
		return -ENOMEM;
	}

	memcpy(bsk->sync_p, sync_p, sizeof(*sync_p));

	bsk->bastet_timer_event = BST_TMR_SET_SOCK_SYNC;

	sk_reset_timer(sk, &bsk->bastet_timer, jiffies + BST_SKIP_SOCK_OWNER_TIME);
	return 0;
}
Esempio n. 13
0
File: output.c Progetto: 274914765/C
void dccp_write_xmit(struct sock *sk, int block)
{
    struct dccp_sock *dp = dccp_sk(sk);
    struct sk_buff *skb;

    while ((skb = skb_peek(&sk->sk_write_queue))) {
        int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

        if (err > 0) {
            if (!block) {
                sk_reset_timer(sk, &dp->dccps_xmit_timer,
                        msecs_to_jiffies(err)+jiffies);
                break;
            } else
                err = dccp_wait_for_ccid(sk, skb, err);
            if (err && err != -EINTR)
                DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
        }

        skb_dequeue(&sk->sk_write_queue);
        if (err == 0) {
            struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
            const int len = skb->len;

            if (sk->sk_state == DCCP_PARTOPEN) {
                /* See 8.1.5.  Handshake Completion */
                inet_csk_schedule_ack(sk);
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                          inet_csk(sk)->icsk_rto,
                          DCCP_RTO_MAX);
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            } else if (dccp_ack_pending(sk))
                dcb->dccpd_type = DCCP_PKT_DATAACK;
            else
                dcb->dccpd_type = DCCP_PKT_DATA;

            err = dccp_transmit_skb(sk, skb);
            ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
            if (err)
                DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
                     err);
        } else {
            dccp_pr_debug("packet discarded due to err=%d\n", err);
            kfree_skb(skb);
        }
    }
}
/*
 * Bastet sock timeout, include all bastet time events.
 */
static void bastet_sock_bastet_timeout(unsigned long data)
{
	int event;
	struct sock *sk = (struct sock *)data;
	struct bastet_sock *bsk = sk->bastet;

	BASTET_LOGI("sk: %p time event: %d", sk, bsk->bastet_timer_event);

	bh_lock_sock(sk);

	/* Include in lock */
	event = bsk->bastet_timer_event;

	if (sock_owned_by_user(sk)) {
		/* Try again later */
		if (BST_TMR_DELAY_SOCK_SYNC == event) {
			bastet_wakelock_acquire_timeout(BST_SKIP_SOCK_OWNER_TIME + BST_WAKELOCK_TIMEOUT);
		}
		sk_reset_timer(sk, &bsk->bastet_timer, jiffies + BST_SKIP_SOCK_OWNER_TIME);
		goto out_unlock;
	}

	switch(event){
	case BST_TMR_REQ_SOCK_SYNC:
		request_sock_bastet_timeout(sk);
		break;
	case BST_TMR_SET_SOCK_SYNC:
		set_sock_bastet_timeout(sk);
		break;
	case BST_TMR_DELAY_SOCK_SYNC:
		delay_sock_bastet_timeout(sk);
		break;
	case BST_TMR_CLOSE_SOCK:
		close_sock_bastet_timeout(sk);
		break;
	default:
		BASTET_LOGE("sk: %p invalid time event: %d", sk, event);
		break;
	}

	sk_mem_reclaim(sk);
out_unlock:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Esempio n. 15
0
void dccp_send_delayed_ack(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	
	unsigned long timeout = jiffies + 2 * HZ;

	
	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
		
		if (icsk->icsk_ack.blocked) {
			dccp_send_ack(sk);
			return;
		}

		if (!time_before(timeout, icsk->icsk_ack.timeout))
			timeout = icsk->icsk_ack.timeout;
	}
	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
	icsk->icsk_ack.timeout = timeout;
	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
/*
 * BST_TMR_REQ_SOCK_SYNC timeout.
 * Request sync time is up, but sock sync properties still invalid.
 * Usually, daemon should set sock sync properties before timeout.
 */
static void request_sock_bastet_timeout(struct sock *sk)
{
	struct bastet_sock *bsk = sk->bastet;

	/* Accurating time */
	if (time_after(bsk->bastet_timeout, jiffies)) {
		sk_reset_timer(sk, &bsk->bastet_timer, bsk->bastet_timeout);
		return;
	}

	/* We must reset timer event, bastet_delay_sock_sync_notify depends on it
	 * this code must be put after accurating time
	 */
	bsk->bastet_timer_event = BST_TMR_EVT_INVALID;

	if (BST_SOCK_UPDATING != bsk->bastet_sock_state){
		BASTET_LOGE("sk: %p state: %d not expected", sk, bsk->bastet_sock_state);
		return;
	}

	/* Try reuqest timer again */
	if (bsk->sync_retry) {
		request_sock_sync(sk);
		return;
	}

	/* If goes here, bastet sock sync failed,
	 * Send or recv data anyway. */
	BASTET_LOGE("sk: %p request timeout", sk);

	if (BST_USER_START == bsk->user_ctrl) {
		/* Before send or recv data, set state to BST_SOCK_VALID*/
		bsk->bastet_sock_state = BST_SOCK_VALID;
		process_sock_send_and_recv(sk);
	} else {
		bsk->bastet_sock_state = BST_SOCK_NOT_USED;
	}
}
Esempio n. 17
0
void dccp_write_xmit(struct sock *sk)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = dccp_qpolicy_top(sk))) {
		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		switch (ccid_packet_dequeue_eval(rc)) {
		case CCID_PACKET_WILL_DEQUEUE_LATER:
			return;
		case CCID_PACKET_DELAY:
			sk_reset_timer(sk, &dp->dccps_xmit_timer,
				       jiffies + msecs_to_jiffies(rc));
			return;
		case CCID_PACKET_SEND_AT_ONCE:
			dccp_xmit_packet(sk);
			break;
		case CCID_PACKET_ERR:
			dccp_qpolicy_drop(sk, skb);
			dccp_pr_debug("packet discarded due to err=%d\n", rc);
		}
	}
}
Esempio n. 18
0
void l2cap_sock_set_timer(struct sock *sk, long timeout)
{
	BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
	sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
Esempio n. 19
0
static int rst_socket_tcp(struct cpt_sock_image *si, loff_t pos, struct sock *sk,
			  struct cpt_context *ctx)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	tp->pred_flags = si->cpt_pred_flags;
	tp->rcv_nxt = si->cpt_rcv_nxt;
	tp->snd_nxt = si->cpt_snd_nxt;
	tp->snd_una = si->cpt_snd_una;
	tp->snd_sml = si->cpt_snd_sml;
	tp->rcv_tstamp = tcp_jiffies_import(si->cpt_rcv_tstamp);
	tp->lsndtime = tcp_jiffies_import(si->cpt_lsndtime);
	tp->tcp_header_len = si->cpt_tcp_header_len;
	inet_csk(sk)->icsk_ack.pending = si->cpt_ack_pending;
	inet_csk(sk)->icsk_ack.quick = si->cpt_quick;
	inet_csk(sk)->icsk_ack.pingpong = si->cpt_pingpong;
	inet_csk(sk)->icsk_ack.blocked = si->cpt_blocked;
	inet_csk(sk)->icsk_ack.ato = si->cpt_ato;
	inet_csk(sk)->icsk_ack.timeout = jiffies_import(si->cpt_ack_timeout);
	inet_csk(sk)->icsk_ack.lrcvtime = tcp_jiffies_import(si->cpt_lrcvtime);
	inet_csk(sk)->icsk_ack.last_seg_size = si->cpt_last_seg_size;
	inet_csk(sk)->icsk_ack.rcv_mss = si->cpt_rcv_mss;
	tp->snd_wl1 = si->cpt_snd_wl1;
	tp->snd_wnd = si->cpt_snd_wnd;
	tp->max_window = si->cpt_max_window;
	inet_csk(sk)->icsk_pmtu_cookie = si->cpt_pmtu_cookie;
	tp->mss_cache = si->cpt_mss_cache;
	tp->rx_opt.mss_clamp = si->cpt_mss_clamp;
	inet_csk(sk)->icsk_ext_hdr_len = si->cpt_ext_header_len;
	inet_csk(sk)->icsk_ca_state = si->cpt_ca_state;
	inet_csk(sk)->icsk_retransmits = si->cpt_retransmits;
	tp->reordering = si->cpt_reordering;
	tp->frto_counter = si->cpt_frto_counter;
	tp->frto_highmark = si->cpt_frto_highmark;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
	// // tp->adv_cong = si->cpt_adv_cong;
#endif
	inet_csk(sk)->icsk_accept_queue.rskq_defer_accept = si->cpt_defer_accept;
	inet_csk(sk)->icsk_backoff = si->cpt_backoff;
	tp->srtt = si->cpt_srtt;
	tp->mdev = si->cpt_mdev;
	tp->mdev_max = si->cpt_mdev_max;
	tp->rttvar = si->cpt_rttvar;
	tp->rtt_seq = si->cpt_rtt_seq;
	inet_csk(sk)->icsk_rto = si->cpt_rto;
	tp->packets_out = si->cpt_packets_out;
	tp->retrans_out = si->cpt_retrans_out;
	tp->lost_out = si->cpt_lost_out;
	tp->sacked_out = si->cpt_sacked_out;
	tp->fackets_out = si->cpt_fackets_out;
	tp->snd_ssthresh = si->cpt_snd_ssthresh;
	tp->snd_cwnd = si->cpt_snd_cwnd;
	tp->snd_cwnd_cnt = si->cpt_snd_cwnd_cnt;
	tp->snd_cwnd_clamp = si->cpt_snd_cwnd_clamp;
	tp->snd_cwnd_used = si->cpt_snd_cwnd_used;
	tp->snd_cwnd_stamp = tcp_jiffies_import(si->cpt_snd_cwnd_stamp);
	inet_csk(sk)->icsk_timeout = tcp_jiffies_import(si->cpt_timeout);
	tp->rcv_wnd = si->cpt_rcv_wnd;
	tp->rcv_wup = si->cpt_rcv_wup;
	tp->write_seq = si->cpt_write_seq;
	tp->pushed_seq = si->cpt_pushed_seq;
	tp->copied_seq = si->cpt_copied_seq;
	tp->rx_opt.tstamp_ok = si->cpt_tstamp_ok;
	tp->rx_opt.wscale_ok = si->cpt_wscale_ok;
	tp->rx_opt.sack_ok = si->cpt_sack_ok;
	tp->rx_opt.saw_tstamp = si->cpt_saw_tstamp;
	tp->rx_opt.snd_wscale = si->cpt_snd_wscale;
	tp->rx_opt.rcv_wscale = si->cpt_rcv_wscale;
	tp->nonagle = si->cpt_nonagle;
	tp->keepalive_probes = si->cpt_keepalive_probes;
	tp->rx_opt.rcv_tsval = si->cpt_rcv_tsval;
	tp->rx_opt.rcv_tsecr = si->cpt_rcv_tsecr;
	tp->rx_opt.ts_recent = si->cpt_ts_recent;
	tp->rx_opt.ts_recent_stamp = si->cpt_ts_recent_stamp;
	tp->rx_opt.user_mss = si->cpt_user_mss;
	tp->rx_opt.dsack = si->cpt_dsack;
	tp->duplicate_sack[0].start_seq = si->cpt_sack_array[0];
	tp->duplicate_sack[0].end_seq = si->cpt_sack_array[1];
	tp->selective_acks[0].start_seq = si->cpt_sack_array[2];
	tp->selective_acks[0].end_seq = si->cpt_sack_array[3];
	tp->selective_acks[1].start_seq = si->cpt_sack_array[4];
	tp->selective_acks[1].end_seq = si->cpt_sack_array[5];
	tp->selective_acks[2].start_seq = si->cpt_sack_array[6];
	tp->selective_acks[2].end_seq = si->cpt_sack_array[7];
	tp->selective_acks[3].start_seq = si->cpt_sack_array[8];
	tp->selective_acks[3].end_seq = si->cpt_sack_array[9];

	tp->window_clamp = si->cpt_window_clamp;
	tp->rcv_ssthresh = si->cpt_rcv_ssthresh;
	inet_csk(sk)->icsk_probes_out = si->cpt_probes_out;
	tp->rx_opt.num_sacks = si->cpt_num_sacks;
	tp->advmss = si->cpt_advmss;
	inet_csk(sk)->icsk_syn_retries = si->cpt_syn_retries;
	tp->ecn_flags = si->cpt_ecn_flags;
	tp->prior_ssthresh = si->cpt_prior_ssthresh;
	tp->high_seq = si->cpt_high_seq;
	tp->retrans_stamp = si->cpt_retrans_stamp;
	tp->undo_marker = si->cpt_undo_marker;
	tp->undo_retrans = si->cpt_undo_retrans;
	tp->urg_seq = si->cpt_urg_seq;
	tp->urg_data = si->cpt_urg_data;
	inet_csk(sk)->icsk_pending = si->cpt_pending;
	tp->snd_up = si->cpt_snd_up;
	tp->keepalive_time = si->cpt_keepalive_time;
	tp->keepalive_intvl = si->cpt_keepalive_intvl;
	tp->linger2 = si->cpt_linger2;

	sk->sk_send_head = NULL;
	for (skb = skb_peek(&sk->sk_write_queue);
	     skb && skb != (struct sk_buff*)&sk->sk_write_queue;
	     skb = skb->next) {
		if (!after(tp->snd_nxt, TCP_SKB_CB(skb)->seq)) {
			sk->sk_send_head = skb;
			break;
		}
	}

	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) {
		struct inet_sock *inet = inet_sk(sk);
		if (inet->num == 0) {
			cpt_object_t *lobj = NULL;

			if ((int)si->cpt_parent != -1)
				lobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, si->cpt_parent, ctx);

			if (lobj && lobj->o_obj) {
				inet->num = ntohs(inet->sport);
				local_bh_disable();
				__inet_inherit_port(lobj->o_obj, sk);
				local_bh_enable();
				dprintk_ctx("port inherited from parent\n");
			} else {
				struct sock *lsk = find_parent(inet->sport, ctx);
				if (lsk) {
					inet->num = ntohs(inet->sport);
					local_bh_disable();
					__inet_inherit_port(lsk, sk);
					local_bh_enable();
					dprintk_ctx("port inherited\n");
				} else {
					eprintk_ctx("we are kinda lost...\n");
				}
			}
		}

		sk->sk_prot->hash(sk);

		if (inet_csk(sk)->icsk_ack.pending&ICSK_ACK_TIMER)
			sk_reset_timer(sk, &inet_csk(sk)->icsk_delack_timer, inet_csk(sk)->icsk_ack.timeout);
		if (inet_csk(sk)->icsk_pending)
			sk_reset_timer(sk, &inet_csk(sk)->icsk_retransmit_timer,
				       inet_csk(sk)->icsk_timeout);
		if (sock_flag(sk, SOCK_KEEPOPEN)) {
			unsigned long expires = jiffies_import(si->cpt_ka_timeout);
			if (time_after(jiffies, expires))
				expires = jiffies + HZ;
			sk_reset_timer(sk, &sk->sk_timer, expires);
		}
	}

	if (sk->sk_family == AF_INET6)
		sk->sk_gso_type = SKB_GSO_TCPV6;
	else
		sk->sk_gso_type = SKB_GSO_TCPV4;

	return 0;
}
/*
 * BST_TMR_DELAY_SOCK_SYNC timeout.
 * If sock is ready, get sock sync properties and post them to daemon
 */
static void delay_sock_bastet_timeout(struct sock *sk)
{
	int err;
	struct bst_set_sock_sync_prop sock_p;
	struct bastet_sock *bsk = sk->bastet;

	/* Accurating time */
	if (time_after(bsk->bastet_timeout, jiffies)) {
		sk_reset_timer(sk, &bsk->bastet_timer, bsk->bastet_timeout);
		return;
	}

	/* We must reset timer event, bastet_delay_sock_sync_notify depends on it
	 * this code must be put after accurating time
	 */
	bsk->bastet_timer_event = BST_TMR_EVT_INVALID;

	/* In repair mode or userspace needs repair, do not sync sock */
	if (unlikely(tcp_sk(sk)->repair || bsk->need_repair)) {
		BASTET_LOGE("sk: %p in repair mode", sk);
		return;
	}

	if (TCP_ESTABLISHED != sk->sk_state) {
		BASTET_LOGE("sk: %p sk_state is not TCP_ESTABLISHED", sk);
		return;
	}

	if (BST_SOCK_VALID != bsk->bastet_sock_state) {
		BASTET_LOGE("sk: %p state: %d not expected", sk, bsk->bastet_sock_state);
		return;
	}

	/* Sock owner has used since last setup */
	if (time_after(bsk->last_sock_active_time_point + bsk->delay_sync_time_section, jiffies)) {
		setup_sock_sync_delay_timer(sk);
		return;
	}

	/* Sock owner has some data unacked,
	 * Coming ack would trigger delay timer again */
	if (!tcp_write_queue_empty(sk)) {
		BASTET_LOGI("sk: %p has sent data not acked", sk);
		post_indicate_packet(BST_IND_TRIGGER_THAW, &bsk->pid, sizeof(pid_t));
		return;
	}

	/* Sock owner has some data to recv, do not sync.
	 * If sock owner has none recv action,
	 * delay timer should be stopped. */
	if (!skb_queue_empty(&sk->sk_receive_queue)) {
		BASTET_LOGI("sk: %p has received data in queue", sk);
		bsk->last_sock_active_time_point = jiffies;
		setup_sock_sync_delay_timer(sk);
		post_indicate_packet(BST_IND_TRIGGER_THAW, &bsk->pid, sizeof(pid_t));
		return;
	}

	memset(&sock_p, 0, sizeof(struct bst_set_sock_sync_prop));
	bastet_get_comm_prop(sk, &sock_p.guide);
	bastet_get_sock_prop(sk, &sock_p.sync_prop);

	err = post_indicate_packet(BST_IND_SOCK_SYNC_PROP, &sock_p, sizeof(sock_p));
	if (!err) {
		/* if post success */
		bsk->bastet_sock_state = BST_SOCK_INVALID;
	}
}
void dn_start_slow_timer(struct sock *sk)
{
	setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
	sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}