void psd_tx_resume(void)
{
	DP_ENTER();

	if (skb_queue_len(&tx_q))
		data_path_schedule_tx(psd_dp);

	DP_LEAVE();
	return;
}
enum data_path_result data_path_xmit(struct data_path *dp,
				     struct sk_buff *skb,
				     enum data_path_priority prio)
{
	enum data_path_result ret = dp_not_open;

	if (dp && atomic_read(&dp->state) == dp_state_opened) {
		tx_q_enqueue(dp, skb, prio);
		data_path_schedule_tx(dp);
		ret = dp_success;
	}

	return ret;
}
void dp_rb_resume_cb(struct shm_rbctl *rbctl)
{
	struct data_path *dp;

	if (!rbctl)
		return;

	shm_rb_resume(rbctl);

	dp = rbctl->priv;

	__pm_wakeup_event(&dp_acipc_wakeup, 2000);
	pr_warn("MSOCK: dp_rb_resume_cb!!!\n");

	if (dp && (atomic_read(&dp->state) == dp_state_opened)) {
		/* do not need to check queue length,
		 * as we need to resume upper layer in tx_func */
		data_path_schedule_tx(dp);
	}
}
void sendPSDData(int cid, struct sk_buff *skb)
{
	struct pduhdr *hdr;
	struct sk_buff *skb2;
	unsigned len;
	unsigned tailpad;
	unsigned long flags;

	len = skb->len;
	tailpad = padding_size(sizeof *hdr + len);

	if (likely(!skb_cloned(skb))) {
		int	headroom = skb_headroom(skb);
		int tailroom = skb_tailroom(skb);

		/* enough room as-is? */
		if (likely(sizeof *hdr + tailpad <= headroom + tailroom)) {
			/* do not need to be readjusted */
			if(sizeof *hdr <= headroom && tailpad <= tailroom)
				goto fill;

			skb->data = memmove(skb->head + sizeof *hdr,
				skb->data, len);
			skb_set_tail_pointer(skb, len);
			goto fill;
		}
	}

	/* create a new skb, with the correct size (and tailpad) */
	skb2 = skb_copy_expand(skb, sizeof *hdr, tailpad + 1, GFP_ATOMIC);
	dev_kfree_skb_any(skb);
	if (unlikely(!skb2))
		return;
	skb = skb2;

	/* fill out the pdu header */
fill:
	hdr = (void *) __skb_push(skb, sizeof *hdr);
	memset(hdr, 0, sizeof *hdr);
	hdr->length = cpu_to_be16(len);
	hdr->cid = cid;
	memset(skb_put(skb, tailpad), 0, tailpad);

	/* link is down */
	if(!data_path_is_link_up()) {
		dev_kfree_skb_any(skb);
		return;
	}

	spin_lock_irqsave(&tx_q_lock, flags);
	/* drop the packet if queue is full */
	if (skb_queue_len(&tx_q) >= PSD_TX_QUEUE_MAX_LEN) {
		struct sk_buff *first = skb_dequeue(&tx_q);
		dev_kfree_skb_any(first);
	}

	skb_queue_tail(&tx_q, skb);
	spin_unlock_irqrestore(&tx_q_lock, flags);

	data_path_schedule_tx(psd_dp);
}
static void data_path_tx_func(unsigned long arg)
{
	struct data_path *dp = (struct data_path *)arg;
	struct shm_rbctl *rbctl = dp->rbctl;
	struct shm_skctl *skctl = rbctl->skctl_va;
	struct shm_psd_skhdr *skhdr;
	struct sk_buff *packet;
	int slot = 0;
	int pending_slot;
	int free_slots;
	int prio;
	int remain_bytes;
	int used_bytes;
	int consumed_slot = 0;
	int consumed_packets = 0;
	int start_q_len;
	int max_tx_shots = dp->max_tx_shots;

	pending_slot = -1;
	remain_bytes = rbctl->tx_skbuf_size - sizeof(struct shm_psd_skhdr);
	used_bytes = 0;

	start_q_len = tx_q_length(dp);

	dp->stat.tx_sched_cnt++;

	while (consumed_slot < max_tx_shots) {
		if (!cp_is_synced) {
			tx_q_clean(dp);
			break;
		}

		free_slots = shm_free_tx_skbuf(rbctl);
		if (free_slots == 0) {
			/*
			 * notify cp only if we still have packets in queue
			 * otherwise, simply break
			 * also check current fc status, if tx_stopped is
			 * already sent to cp, do not try to interrupt cp again
			 * it is useless, and just make cp busier
			 * BTW:
			 * this may have race condition here, but as cp side
			 * have a watermark for resume interrupt,
			 * we can assume it is safe
			 */
			if (tx_q_length(dp) && !rbctl->is_ap_xmit_stopped) {
				shm_notify_ap_tx_stopped(rbctl);
				acipc_notify_ap_psd_tx_stopped();
			}
			break;
		} else if (free_slots == 1 && pending_slot != -1) {
			/*
			 * the only left slot is our pending slot
			 * check if we still have enough space in this
			 * pending slot
			 */
			packet = tx_q_peek(dp, NULL);
			if (!packet)
				break;

			/* packet is too large, notify cp and break */
			if (padded_size(packet->len) > remain_bytes &&
				!rbctl->is_ap_xmit_stopped) {
				shm_notify_ap_tx_stopped(rbctl);
				acipc_notify_ap_psd_tx_stopped();
				break;
			}
		}

		packet = tx_q_dequeue(dp, &prio);

		if (!packet)
			break;

		/* push to ring buffer */

		/* we have one slot pending */
		if (pending_slot != -1) {
			/*
			 * the packet is too large for the pending slot
			 * send out the pending slot firstly
			 */
			if (padded_size(packet->len) > remain_bytes) {
				shm_flush_dcache(rbctl,
						SHM_PACKET_PTR(rbctl->tx_va,
							pending_slot,
							rbctl->tx_skbuf_size),
						used_bytes + sizeof(struct shm_psd_skhdr));
				skctl->ap_wptr = pending_slot;
				pending_slot = -1;
				consumed_slot++;
				dp->stat.tx_slots++;
				dp->stat.tx_free_bytes += remain_bytes;
				dp->stat.tx_used_bytes += used_bytes;
			} else
				slot = pending_slot;
		}

		/*
		 * each priority has one hard limit to guarantee higher priority
		 * packet is not affected by lower priority packet
		 * if we reach this limit, we can only send higher priority
		 * packets
		 * but in the other hand, if this packet can be filled into our
		 * pending slot, allow it anyway
		 */
		if (!has_enough_free_tx_slot(dp, free_slots, prio) &&
			((pending_slot == -1) || !dp->enable_piggyback)) {
			/* push back the packets and schedule delayed tx */
			tx_q_queue_head(dp, packet, prio);
			__data_path_schedule_tx(dp, true);
			dp->stat.tx_force_sched_cnt++;
			break;
		}

		/* get a new slot from ring buffer */
		if (pending_slot == -1) {
			slot = shm_get_next_tx_slot(dp->rbctl, skctl->ap_wptr);

			remain_bytes =
				rbctl->tx_skbuf_size
				- sizeof(struct shm_psd_skhdr);
			used_bytes = 0;

			pending_slot = slot;
		}

		consumed_packets++;

		dp->stat.tx_packets[prio]++;
		dp->stat.tx_bytes += packet->len;

		skhdr = (struct shm_psd_skhdr *)
			SHM_PACKET_PTR(rbctl->tx_va,
				slot,
				rbctl->tx_skbuf_size);

		/* we are sure our remains is enough for current packet */
		skhdr->length = used_bytes + padded_size(packet->len);
		memcpy((unsigned char *)(skhdr + 1) + used_bytes,
			packet->data, packet->len);

		used_bytes += padded_size(packet->len);
		remain_bytes -= padded_size(packet->len);

		trace_psd_xmit(packet, slot);

		dp->stat.tx_packets_delay[prio] +=
			ktime_to_ns(net_timedelta(skb_get_ktime(packet)));

		dev_kfree_skb_any(packet);
	}

	/* send out the pending slot */
	if (pending_slot != -1) {
		shm_flush_dcache(rbctl, SHM_PACKET_PTR(rbctl->tx_va,
				pending_slot,
				rbctl->tx_skbuf_size),
			used_bytes + sizeof(struct shm_psd_skhdr));
		skctl->ap_wptr = pending_slot;
		pending_slot = -1;
		consumed_slot++;
		dp->stat.tx_slots++;
		dp->stat.tx_free_bytes += remain_bytes;
		dp->stat.tx_used_bytes += used_bytes;
	}

	if (consumed_slot > 0) {
		trace_psd_xmit_irq(consumed_slot);
		acipc_notify_psd_packet_sent();
		dp->stat.tx_interrupts++;
		dp->stat.tx_sched_q_len += start_q_len;
	}

	if (consumed_slot >= max_tx_shots) {
		data_path_schedule_tx(dp);
		dp->stat.tx_resched_cnt++;
	}

	/*
	 * ring buffer is stopped, just notify upper layer
	 * do not need to check is_tx_stopped here, as we need to handle
	 * following situation:
	 * a new on-demand PDP is activated after tx_stop is called
	 */
	if (rbctl->is_ap_xmit_stopped) {
		if (!dp->is_tx_stopped)
			pr_err("%s tx stop\n", __func__);

		dp->is_tx_stopped = true;

		/* notify upper layer tx stopped */
		if (dp->cbs->tx_stop)
			dp->cbs->tx_stop();

		/* reschedule tx to polling the ring buffer */
		if (tx_q_length(dp))
			__data_path_schedule_tx(dp, true);
	}

	/*
	 * ring buffer is resumed and the remain packets
	 * in queue is also sent out
	 */
	if (!rbctl->is_ap_xmit_stopped && dp->is_tx_stopped
		&& tx_q_length(dp) == 0) {
		pr_err("%s tx resume\n", __func__);

		/* notify upper layer tx resumed */
		if (dp->cbs->tx_resume)
			dp->cbs->tx_resume();

		dp->is_tx_stopped = false;
	}
}