/* Effectively free all iobufs accumulated */
void nvshm_iobuf_bbc_free(struct nvshm_handle *handle)
{
	struct nvshm_iobuf *iob = NULL;
	unsigned long f;

	spin_lock_irqsave(&alloc.lock, f);
	if (alloc.bbc_pool_head) {
		alloc.free_count = 0;
		iob = alloc.bbc_pool_head;
		alloc.bbc_pool_head = alloc.bbc_pool_tail = NULL;
	}
	spin_unlock_irqrestore(&alloc.lock, f);
	if (iob) {
		nvshm_queue_put(handle, iob);
		nvshm_generate_ipc(handle);
	}
}
int nvshm_write(struct nvshm_channel *handle, struct nvshm_iobuf *iob)
{
	unsigned long f;
	struct nvshm_handle *priv = nvshm_get_handle();
	struct nvshm_iobuf *list, *leaf;
	int count = 0, ret = 0;

	spin_lock_irqsave(&priv->lock, f);
	if (!priv->chan[handle->index].ops) {
		pr_err("%s: channel not mapped\n", __func__);
		spin_unlock_irqrestore(&priv->lock, f);
		return -EINVAL;
	}

	list = iob;
	while (list) {
		count++;
		list->chan = handle->index;
		leaf = list->sg_next;
		while (leaf) {
			count++;
			leaf = NVSHM_B2A(priv, leaf);
			leaf->chan = handle->index;
			leaf = leaf->sg_next;
		}
		list = list->next;
		if (list)
			list = NVSHM_B2A(priv, list);
	}
	priv->chan[handle->index].rate_counter -= count;
	if (priv->chan[handle->index].rate_counter < 0) {
		priv->chan[handle->index].xoff = 1;
		pr_warn("%s: rate limit hit on chan %d\n", __func__,
			handle->index);
		ret = 1;
	}

	iob->qnext = NULL;
	nvshm_queue_put(priv, iob);
	nvshm_generate_ipc(priv);
	spin_unlock_irqrestore(&priv->lock, f);
	return ret;
}