/*
 * Adjust sock sync properties.
 */
static int adjust_sock_sync_prop(struct sock *sk, struct bst_sock_sync_prop *sync_p)
{
	struct tcp_sock *tp;
	struct sk_buff *skb = NULL;
	u32 seq_changed = 0;
	u32 new_seq;

	tp = tcp_sk(sk);
	new_seq = sync_p->seq;

	/*
	 * There may be more than one sk_buff in tcp write queue,
	 * Adjust them all.
	 */
	skb = tcp_write_queue_head(sk);
	while (NULL != skb) {
		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);

		if (skb == tcp_write_queue_head(sk)) {
			if (new_seq < tcb->seq) {
				seq_changed = BST_MAX_SEQ_VALUE - tcb->seq;
				seq_changed += new_seq;
			} else {
				seq_changed = new_seq - tcb->seq;
			}
			new_seq = tp->write_seq + seq_changed;
		}

		tcb->seq += seq_changed;
		tcb->end_seq += seq_changed;

		if (!tcp_skb_is_last(sk, skb))
			skb = tcp_write_queue_next(sk, skb);
		else
			skb = NULL;
	}

	if (NULL == tcp_write_queue_head(sk)) {
		seq_changed = new_seq - tp->write_seq;
	}

	tp->write_seq = new_seq;
	tp->rcv_nxt = sync_p->rcv_nxt;
	tp->snd_wnd = sync_p->snd_wnd;
	tp->snd_nxt += seq_changed;

	tp->copied_seq = tp->rcv_nxt;
	tp->rcv_wup = tp->rcv_nxt;
	tp->snd_una += seq_changed;

	if (likely(tp->rx_opt.tstamp_ok)) {
		tp->tsoffset = sync_p->ts_current - tcp_time_stamp;

		bastet_store_ts_recent(tp, sync_p->ts_recent, sync_p->ts_recent_tick);
	}

	return 0;
}
/**
 * Cleans the meta-socket retransmission queue and the reinject-queue.
 * @sk must be the metasocket.
 */
static void mptcp_clean_rtx_queue(struct sock *meta_sk, u32 prior_snd_una)
{
	struct sk_buff *skb, *tmp;
	struct tcp_sock *meta_tp = tcp_sk(meta_sk);
	struct mptcp_cb *mpcb = meta_tp->mpcb;
	int acked = 0;

	while ((skb = tcp_write_queue_head(meta_sk)) &&
	       skb != tcp_send_head(meta_sk)) {
		if (before(meta_tp->snd_una, TCP_SKB_CB(skb)->end_seq))
			break;

		tcp_unlink_write_queue(skb, meta_sk);

		if (mptcp_is_data_fin(skb)) {
			struct sock *sk_it;

			/* DATA_FIN has been acknowledged - now we can close
			 * the subflows
			 */
			mptcp_for_each_sk(mpcb, sk_it) {
				unsigned long delay = 0;

				/* If we are the passive closer, don't trigger
				 * subflow-fin until the subflow has been finned
				 * by the peer - thus we add a delay.
				 */
				if (mpcb->passive_close &&
				    sk_it->sk_state == TCP_ESTABLISHED)
					delay = inet_csk(sk_it)->icsk_rto << 3;

				mptcp_sub_close(sk_it, delay);
			}
		}

		meta_tp->packets_out -= tcp_skb_pcount(skb);
		sk_wmem_free_skb(meta_sk, skb);

		acked = 1;
	}