void nvshm_iobuf_dump(struct nvshm_iobuf *iob)
{
	struct nvshm_handle *priv = nvshm_get_handle();

	pr_err("iobuf (0x%p) dump:\n", NVSHM_A2B(priv, iob));
	pr_err("\t data      = 0x%p (%s)\n", iob->npduData,
	       give_pointer_location(priv, iob->npduData));
	pr_err("\t length    = %d\n", iob->length);
	pr_err("\t offset    = %d\n", iob->dataOffset);
	pr_err("\t total_len = %d\n", iob->totalLength);
	pr_err("\t ref       = %d\n", iob->ref);
	pr_err("\t pool_id   = %d (%s)\n", iob->pool_id,
	       (iob->pool_id < NVSHM_AP_POOL_ID) ? "BBC" : "AP");
	pr_err("\t next      = 0x%p (%s)\n", iob->next,
	       give_pointer_location(priv, iob->next));
	pr_err("\t sg_next   = 0x%p (%s)\n", iob->sg_next,
	       give_pointer_location(priv, iob->sg_next));
	pr_err("\t flags     = 0x%x\n", iob->flags);
	pr_err("\t _size     = %d\n", iob->_size);
	pr_err("\t _handle   = 0x%p\n", iob->_handle);
	pr_err("\t _reserved = 0x%x\n", iob->_reserved);
	pr_err("\t qnext     = 0x%p (%s)\n", iob->qnext,
	       give_pointer_location(priv, iob->qnext));
	pr_err("\t chan      = 0x%x\n", iob->chan);
	pr_err("\t qflags    = 0x%x\n", iob->qflags);
}
/** Single iobuf free - do not follow iobuf links */
void nvshm_iobuf_free(struct nvshm_iobuf *desc)
{
	struct nvshm_handle *priv = nvshm_get_handle();
	int callback = 0, chan;
	unsigned long f;

	if (desc->ref == 0) {
		pr_err("%s: freeing an already freed iobuf (0x%x)\n",
		       __func__,
		       (unsigned int)desc);
		return;
	}
	spin_lock_irqsave(&alloc.lock, f);
	pr_debug("%s: free 0x%p ref %d pool %x\n", __func__,
		 desc, desc->ref, desc->pool_id);
	desc->ref--;
	chan = desc->chan;
	if (desc->ref == 0) {
		if (desc->pool_id >= NVSHM_AP_POOL_ID) {
			/* update rate counter */
			if ((chan >= 0) &&
			    (chan < priv->chan_count)) {
				if ((priv->chan[chan].rate_counter++ ==
				     NVSHM_RATE_LIMIT_TRESHOLD)
				    && (priv->chan[chan].xoff)) {
					priv->chan[chan].xoff = 0;
					callback = 1;
				}
			}
			desc->sg_next = NULL;
			desc->next = NULL;
			desc->length = 0;
			desc->flags = 0;
			desc->dataOffset = 0;
			desc->chan = 0;
			if (alloc.free_pool_tail) {
				alloc.free_pool_tail->next = NVSHM_A2B(priv,
								       desc);
				alloc.free_pool_tail = desc;
			} else {
				alloc.free_pool_head = desc;
				alloc.free_pool_tail = desc;
			}
		} else {
			/* iobuf belongs to other side */
			pr_debug("%s: re-queue freed buffer\n", __func__);
			desc->sg_next = NULL;
			desc->next = NULL;
			desc->length = 0;
			desc->dataOffset = 0;
			spin_unlock_irqrestore(&alloc.lock, f);
			bbc_free(priv, desc);
			return;
		}
	}
	spin_unlock_irqrestore(&alloc.lock, f);
	if (callback)
		nvshm_start_tx(&priv->chan[chan]);
}
/** Returned iobuf are already freed - just process them */
void nvshm_iobuf_process_freed(struct nvshm_iobuf *desc)
{
	struct nvshm_handle *priv = nvshm_get_handle();
	unsigned long f;

	while (desc) {
		int callback = 0, chan;
		struct nvshm_iobuf *next = desc->next;

		if (desc->ref != 0) {
			pr_err("%s: BBC returned an non freed iobuf (0x%x)\n",
			       __func__,
			       (unsigned int)desc);
			return;
		}

		chan = desc->chan;
		spin_lock_irqsave(&alloc.lock, f);
		/* update rate counter */
		if ((chan >= 0) &&
		    (chan < priv->chan_count)) {
			if ((priv->chan[chan].rate_counter++ ==
			     NVSHM_RATE_LIMIT_TRESHOLD)
			    && (priv->chan[chan].xoff)) {
				priv->chan[chan].xoff = 0;
				callback = 1;
			}
		}
		desc->sg_next = NULL;
		desc->next = NULL;
		desc->length = 0;
		desc->flags = 0;
		desc->dataOffset = 0;
		desc->chan = 0;
		if (alloc.free_pool_tail) {
			alloc.free_pool_tail->next = NVSHM_A2B(priv,
							       desc);
			alloc.free_pool_tail = desc;
		} else {
			alloc.free_pool_head = desc;
				alloc.free_pool_tail = desc;
		}
		spin_unlock_irqrestore(&alloc.lock, f);
		if (callback)
			nvshm_start_tx(&priv->chan[chan]);
		if (next) {
			desc = NVSHM_B2A(priv, next);
		} else {
			desc = next;
		}
	}
}
struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle)
{
	struct nvshm_iobuf *dummy, *ret;

	if (!handle->shared_queue_head) {
		pr_err("%s: Queue not init!\n", __func__);
		return NULL;
	}

	dummy = handle->shared_queue_head;
	/* Invalidate lower part of iobuf - upper part can be written by AP */

	INV_CPU_DCACHE(&dummy->qnext,
		       sizeof(struct nvshm_iobuf) / 2);
	ret = NVSHM_B2A(handle, dummy->qnext);

	if (dummy->qnext == NULL)
		return NULL;

	/* Invalidate iobuf(s) and check validity */
	handle->errno = inv_iob_list(handle, ret);

	if (handle->errno) {
		pr_err("%s: queue corruption\n", __func__);
		return NULL;
	}

	handle->shared_queue_head = ret;

	/* Update queue_bb_offset for debug purpose */
	handle->conf->queue_bb_offset = (int)ret
		- (int)handle->ipc_base_virt;

	if ((handle->conf->queue_bb_offset < 0) ||
	    (handle->conf->queue_bb_offset > handle->conf->shmem_size))
		pr_err("%s: out of bound descriptor offset %d addr 0x%p/0x%p\n",
		       __func__,
		       handle->conf->queue_bb_offset,
		       ret,
		       NVSHM_A2B(handle, ret));

	pr_debug("%s (%p)->%p->(%p)\n", __func__,
		 dummy, ret, ret->qnext);

	dummy->qnext = NULL;
	nvshm_iobuf_free(dummy);

	return ret;
}
/* This saves a lot of CPU/memory cycles on both sides */
static void bbc_free(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
{
	unsigned long f;

	spin_lock_irqsave(&alloc.lock, f);
	alloc.free_count++;
	if (alloc.bbc_pool_head) {
		alloc.bbc_pool_tail->next = NVSHM_A2B(handle, iob);
		alloc.bbc_pool_tail = iob;
	} else {
		alloc.bbc_pool_head = alloc.bbc_pool_tail = iob;
	}
	spin_unlock_irqrestore(&alloc.lock, f);
	if (alloc.free_count > NVSHM_MAX_FREE_PENDING)
		nvshm_iobuf_bbc_free(handle);
}
int nvshm_queue_put(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
{
	unsigned long f;

	spin_lock_irqsave(&handle->qlock, f);
	if (!handle->shared_queue_tail) {
		spin_unlock_irqrestore(&handle->qlock, f);
		pr_err("%s: Queue not init!\n", __func__);
		return -EINVAL;
	}

	if (!iob) {
		pr_err("%s: Queueing null pointer!\n", __func__);
		spin_unlock_irqrestore(&handle->qlock, f);
		return -EINVAL;
	}

	/* Sanity check */
	if (handle->shared_queue_tail->qnext) {
		pr_err("%s: illegal queue pointer detected!\n", __func__);
		spin_unlock_irqrestore(&handle->qlock, f);
		return -EINVAL;
	}

	pr_debug("%s (%p)->%p/%d/%d->%p\n", __func__,
		handle->shared_queue_tail,
		iob, iob->chan, iob->length,
		iob->next);

	/* Take a reference on queued iobuf */
	nvshm_iobuf_ref(iob);
	/* Flush iobuf(s) in cache */
	flush_iob_list(handle, iob);
	handle->shared_queue_tail->qnext = NVSHM_A2B(handle, iob);
	/* Flush guard element from cache */
	FLUSH_CPU_DCACHE(handle->shared_queue_tail, sizeof(struct nvshm_iobuf));
	handle->shared_queue_tail = iob;

	spin_unlock_irqrestore(&handle->qlock, f);
	return 0;
}
int nvshm_iobuf_init(struct nvshm_handle *handle)
{
	struct nvshm_iobuf *iob;
	int ndesc, desc, datasize;
	unsigned char *dataptr;

	pr_debug("%s instance %d\n", __func__, handle->instance);

	spin_lock_init(&alloc.lock);
	/* Clear BBC free list */
	alloc.bbc_pool_head = alloc.bbc_pool_tail = NULL;
	alloc.free_count = 0;
	ndesc = handle->desc_size / sizeof(struct nvshm_iobuf) ;
	alloc.nbuf = ndesc;
	alloc.free_count = 0;
	datasize =  handle->data_size / ndesc;
	spin_lock(&alloc.lock);
	if (handle->shared_queue_tail != handle->desc_base_virt) {
		pr_err("%s initial tail != desc_base_virt not supported yet\n",
		       __func__);
	}
	iob = (struct nvshm_iobuf *)handle->desc_base_virt;

	dataptr = handle->data_base_virt;
	/* Invalidate all data region */
	INV_CPU_DCACHE(dataptr, handle->data_size);
	/* Clear all desc region */
	memset(handle->desc_base_virt, 0, handle->desc_size);
	/* Dummy queue element */
	iob->npduData = NVSHM_A2B(handle, dataptr);
	dataptr += datasize;
	iob->dataOffset = NVSHM_DEFAULT_OFFSET;
	iob->totalLength = datasize;
	iob->chan = -1;
	iob->next = NULL;
	iob->pool_id = NVSHM_AP_POOL_ID;
	iob->ref = 1;
	alloc.free_pool_head = ++iob;
	for (desc = 1; desc < (ndesc-1); desc++) {
		iob->npduData = NVSHM_A2B(handle, dataptr);
		dataptr += datasize;
		iob->dataOffset = NVSHM_DEFAULT_OFFSET;
		iob->totalLength = datasize;
		iob->next = NVSHM_A2B(handle, (void *)iob +
				      sizeof(struct nvshm_iobuf));
		iob->pool_id = NVSHM_AP_POOL_ID;
		iob++;
	}
	/* Untied last */
	iob->npduData = NVSHM_A2B(handle, dataptr);
	iob->dataOffset = NVSHM_DEFAULT_OFFSET;
	iob->totalLength = datasize;
	iob->pool_id = NVSHM_AP_POOL_ID;
	iob->next = NULL;

	alloc.free_pool_tail = iob;
	/* Flush all descriptor region */
	FLUSH_CPU_DCACHE(handle->desc_base_virt,
			 (long)handle->desc_size);
	spin_unlock(&alloc.lock);
	return 0;
}
int nvshm_iobuf_check(struct nvshm_iobuf *iob)
{
	struct nvshm_handle *priv = nvshm_get_handle();
	struct nvshm_iobuf *bbiob;
	int ret = 0;

	/* Check iobuf is in IPC space */
	if (ADDR_OUTSIDE(iob, priv->ipc_base_virt, priv->ipc_size)) {
		pr_err("%s: iob @ check failed 0x%lx\n",
		       __func__,
		       (long)iob);
		return -1;
	}

	bbiob = NVSHM_A2B(priv, iob);

	if (ADDR_OUTSIDE(iob->npduData, NVSHM_IPC_BB_BASE, priv->ipc_size)) {
		pr_err("%s 0x%lx: npduData @ check failed 0x%lx\n",
		       __func__,
		       (long)bbiob,
		       (long)iob->npduData);
		ret = -2;
		goto dump;
	}
	if (ADDR_OUTSIDE(iob->npduData + iob->dataOffset,
			NVSHM_IPC_BB_BASE, priv->ipc_size)) {
		pr_err("%s 0x%lx: npduData + offset @ check failed 0x%lx/0x%lx\n",
		       __func__, (long)bbiob,
		       (long)iob->npduData, (long)iob->dataOffset);
		ret = -3;
		goto dump;
	}
	if (iob->next) {
		if (ADDR_OUTSIDE(iob->next,
				NVSHM_IPC_BB_BASE, priv->ipc_size)) {
			pr_err("%s 0x%lx: next @ check failed 0x%lx\n",
			       __func__,
			       (long)bbiob,
			       (long)iob->next);
			ret = -4;
			goto dump;
		}
	}
	if (iob->sg_next) {
		if (ADDR_OUTSIDE(iob->sg_next,
				NVSHM_IPC_BB_BASE, priv->ipc_size)) {
			pr_err("%s 0x%lx:sg_next @ check failed 0x%lx\n",
			       __func__, (long)bbiob, (long)iob->sg_next);
			ret = -5;
			goto dump;
		}
	}
	if (iob->qnext) {
		if (ADDR_OUTSIDE(iob->qnext,
				NVSHM_IPC_BB_BASE, priv->ipc_size)) {
			pr_err("%s 0x%lx:qnext @ check failed 0x%lx\n",
			       __func__, (long)bbiob, (long)iob->qnext);
			ret = -6;
			goto dump;
		}
	}

	return ret;
dump:
	nvshm_iobuf_dump(iob);
	return ret;
}