int nvshm_iobuf_unref_cluster(struct nvshm_iobuf *iob)
{
	int ref, ret = 0;
	struct nvshm_iobuf *_phy_list, *_phy_leaf;
	struct nvshm_handle *handle = nvshm_get_handle();

	_phy_list = iob;
	while (_phy_list) {
		_phy_leaf = _phy_list;
		while (_phy_leaf) {
			ref = nvshm_iobuf_unref(_phy_leaf);
			ret = (ref > ret) ? ref : ret;
			if (_phy_leaf->sg_next) {
				_phy_leaf = NVSHM_B2A(handle,
						      _phy_leaf->sg_next);
			} else {
				_phy_leaf = NULL;
			}
		}
		if (_phy_list->next)
			_phy_list = NVSHM_B2A(handle, _phy_list->next);
		else
			_phy_list = NULL;
	}

	return ret;
}
/* Flush cache lines associated with iobuf list */
static void flush_iob_list(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
{
	struct nvshm_iobuf *phy_list, *leaf, *next, *sg_next;

	phy_list = iob;
	while (phy_list) {
		leaf = phy_list;
		next = phy_list->next;
		while (leaf) {
			sg_next = leaf->sg_next;
			WARN_ON_ONCE(nvshm_iobuf_check(leaf) < 0);
			/* Flush associated data */
			if (leaf->length) {
				FLUSH_CPU_DCACHE(NVSHM_B2A(handle,
							   (int)leaf->npduData
							   + leaf->dataOffset),
						 leaf->length);
			}
			/* Flush iobuf */
			FLUSH_CPU_DCACHE(leaf, sizeof(struct nvshm_iobuf));
			if (sg_next)
				leaf = NVSHM_B2A(handle, sg_next);
			else
				leaf = NULL;
		}
		if (next)
			phy_list = NVSHM_B2A(handle, next);
		else
			phy_list = NULL;
	}
}
Exemple #3
0
struct nvshm_iobuf *nvshm_iobuf_alloc(struct nvshm_channel *chan, int size)
{
	struct nvshm_handle *handle = nvshm_get_handle();
	struct nvshm_iobuf *desc = NULL;
	unsigned long f;

	spin_lock_irqsave(&alloc.lock, f);
	if (alloc.free_pool_head) {
		int check = nvshm_iobuf_check(alloc.free_pool_head);

		if (check) {
			spin_unlock_irqrestore(&alloc.lock, f);
			pr_err("%s: iobuf check ret %d\n", __func__, check);
			return NULL;
		}
		if (size > (alloc.free_pool_head->total_length -
			    NVSHM_DEFAULT_OFFSET)) {
			spin_unlock_irqrestore(&alloc.lock, f);
			pr_err("%s: requested size (%d > %d) too big\n",
			       __func__,
			       size,
			       alloc.free_pool_head->total_length -
			       NVSHM_DEFAULT_OFFSET);
			if (chan->ops) {
				chan->ops->error_event(chan,
						       NVSHM_IOBUF_ERROR);
			}
			return desc;
		}
		desc = alloc.free_pool_head;
		alloc.free_pool_head = desc->next;
		if (alloc.free_pool_head) {
			alloc.free_pool_head = NVSHM_B2A(handle,
							 alloc.free_pool_head);
		} else {
			pr_debug("%s end of alloc queue - clearing tail\n",
				__func__);
			alloc.free_pool_tail = NULL;
		}
		desc->length = 0;
		desc->flags = 0;
		desc->data_offset = NVSHM_DEFAULT_OFFSET;
		desc->sg_next = NULL;
		desc->next = NULL;
		desc->ref = 1;

	} else {
		spin_unlock_irqrestore(&alloc.lock, f);
		pr_err("%s: no more alloc space\n", __func__);
		/* No error since it's only Xoff situation */
		return desc;
	}

	spin_unlock_irqrestore(&alloc.lock, f);

	return desc;
}
int nvshm_write(struct nvshm_channel *handle, struct nvshm_iobuf *iob)
{
	unsigned long f;
	struct nvshm_handle *priv = nvshm_get_handle();
	struct nvshm_iobuf *list, *leaf;
	int count = 0, ret = 0;

	spin_lock_irqsave(&priv->lock, f);
	if (!priv->chan[handle->index].ops) {
		pr_err("%s: channel not mapped\n", __func__);
		spin_unlock_irqrestore(&priv->lock, f);
		return -EINVAL;
	}

	list = iob;
	while (list) {
		count++;
		list->chan = handle->index;
		leaf = list->sg_next;
		while (leaf) {
			count++;
			leaf = NVSHM_B2A(priv, leaf);
			leaf->chan = handle->index;
			leaf = leaf->sg_next;
		}
		list = list->next;
		if (list)
			list = NVSHM_B2A(priv, list);
	}
	priv->chan[handle->index].rate_counter -= count;
	if (priv->chan[handle->index].rate_counter < 0) {
		priv->chan[handle->index].xoff = 1;
		pr_warn("%s: rate limit hit on chan %d\n", __func__,
			handle->index);
		ret = 1;
	}

	iob->qnext = NULL;
	nvshm_queue_put(priv, iob);
	nvshm_generate_ipc(priv);
	spin_unlock_irqrestore(&priv->lock, f);
	return ret;
}
/** Returned iobuf are already freed - just process them */
void nvshm_iobuf_process_freed(struct nvshm_iobuf *desc)
{
	struct nvshm_handle *priv = nvshm_get_handle();
	unsigned long f;

	while (desc) {
		int callback = 0, chan;
		struct nvshm_iobuf *next = desc->next;

		if (desc->ref != 0) {
			pr_err("%s: BBC returned an non freed iobuf (0x%x)\n",
			       __func__,
			       (unsigned int)desc);
			return;
		}

		chan = desc->chan;
		spin_lock_irqsave(&alloc.lock, f);
		/* update rate counter */
		if ((chan >= 0) &&
		    (chan < priv->chan_count)) {
			if ((priv->chan[chan].rate_counter++ ==
			     NVSHM_RATE_LIMIT_TRESHOLD)
			    && (priv->chan[chan].xoff)) {
				priv->chan[chan].xoff = 0;
				callback = 1;
			}
		}
		desc->sg_next = NULL;
		desc->next = NULL;
		desc->length = 0;
		desc->flags = 0;
		desc->dataOffset = 0;
		desc->chan = 0;
		if (alloc.free_pool_tail) {
			alloc.free_pool_tail->next = NVSHM_A2B(priv,
							       desc);
			alloc.free_pool_tail = desc;
		} else {
			alloc.free_pool_head = desc;
				alloc.free_pool_tail = desc;
		}
		spin_unlock_irqrestore(&alloc.lock, f);
		if (callback)
			nvshm_start_tx(&priv->chan[chan]);
		if (next) {
			desc = NVSHM_B2A(priv, next);
		} else {
			desc = next;
		}
	}
}
/* Return 0 if ok or non zero otherwise */
static int inv_iob_list(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
{
	struct nvshm_iobuf *phy_list, *leaf;

	phy_list = iob;
	while (phy_list) {
		leaf = phy_list;
		while (leaf) {
			/* Check leaf address before any operation on it */
			/* Cannot use nvshm_iobuf_check because iobuf */
			/* is not invalidated so content will be wrong */
			if (ADDR_OUTSIDE(leaf, handle->ipc_base_virt,
					handle->ipc_size)) {
				return -EIO;
			}
			/* Invalidate iobuf */
			INV_CPU_DCACHE(leaf, sizeof(struct nvshm_iobuf));
			/* Check iobuf */
			if (nvshm_iobuf_check(leaf))
				return -EIO;
			/* Invalidate associated data */
			if (leaf->length) {
				INV_CPU_DCACHE(NVSHM_B2A(handle,
							   (int)leaf->npduData
							   + leaf->dataOffset),
							   leaf->length);
			}
			if (leaf->sg_next)
				leaf = NVSHM_B2A(handle, leaf->sg_next);
			else
				leaf = NULL;
		}
		if (phy_list->next)
			phy_list = NVSHM_B2A(handle, phy_list->next);
		else
			phy_list = NULL;
	}
	return 0;
}
void nvshm_iobuf_free_cluster(struct nvshm_iobuf *list)
{
	struct nvshm_handle *priv = nvshm_get_handle();
	struct nvshm_iobuf *_phy_list, *_to_free, *leaf;
	int n = 0;

	_phy_list = list;
	while (_phy_list) {
		_to_free = list;
		if (list->sg_next) {
			_phy_list = list->sg_next;
			if (_phy_list) {
				leaf = NVSHM_B2A(priv, _phy_list);
				leaf->next = list->next;
			}
		} else {
			_phy_list = list->next;
		}
		list = NVSHM_B2A(priv, _phy_list);
		n++;
		nvshm_iobuf_free(_to_free);
	}
}
struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle)
{
	struct nvshm_iobuf *dummy, *ret;

	if (!handle->shared_queue_head) {
		pr_err("%s: Queue not init!\n", __func__);
		return NULL;
	}

	dummy = handle->shared_queue_head;
	/* Invalidate lower part of iobuf - upper part can be written by AP */

	INV_CPU_DCACHE(&dummy->qnext,
		       sizeof(struct nvshm_iobuf) / 2);
	ret = NVSHM_B2A(handle, dummy->qnext);

	if (dummy->qnext == NULL)
		return NULL;

	/* Invalidate iobuf(s) and check validity */
	handle->errno = inv_iob_list(handle, ret);

	if (handle->errno) {
		pr_err("%s: queue corruption\n", __func__);
		return NULL;
	}

	handle->shared_queue_head = ret;

	/* Update queue_bb_offset for debug purpose */
	handle->conf->queue_bb_offset = (int)ret
		- (int)handle->ipc_base_virt;

	if ((handle->conf->queue_bb_offset < 0) ||
	    (handle->conf->queue_bb_offset > handle->conf->shmem_size))
		pr_err("%s: out of bound descriptor offset %d addr 0x%p/0x%p\n",
		       __func__,
		       handle->conf->queue_bb_offset,
		       ret,
		       NVSHM_A2B(handle, ret));

	pr_debug("%s (%p)->%p->(%p)\n", __func__,
		 dummy, ret, ret->qnext);

	dummy->qnext = NULL;
	nvshm_iobuf_free(dummy);

	return ret;
}
static const char *give_pointer_location(struct nvshm_handle *handle, void *ptr)
{
	if (!ptr)
		return "null";

	ptr = NVSHM_B2A(handle, ptr);

	if (ADDR_OUTSIDE(ptr, handle->desc_base_virt, handle->desc_size)
	    && ADDR_OUTSIDE(ptr, handle->data_base_virt, handle->data_size)) {
		if (ADDR_OUTSIDE(ptr, handle->ipc_base_virt, handle->ipc_size))
			return "Err";
		else
			return "BBC";
	}

	return "AP";
}
/*
 * Called from IPC workqueue
 */
void nvshm_process_queue(struct nvshm_handle *handle)
{
	unsigned long f;
	struct nvshm_iobuf *iob;
	struct nvshm_if_operations *ops;
	int chan;

	spin_lock_irqsave(&handle->lock, f);
	iob = nvshm_queue_get(handle);
	while (iob) {
		pr_debug("%s %p/%d/%d/%d->%p\n", __func__,
			iob, iob->chan, iob->length, iob->ref, iob->next);
		tegra_bb_clear_ipc(handle->tegra_bb);
		chan = iob->chan;
		if (iob->pool_id < NVSHM_AP_POOL_ID) {
			ops = handle->chan[chan].ops;
			if (ops) {
				spin_unlock_irqrestore(&handle->lock, f);
				ops->rx_event(
					&handle->chan[chan],
					iob);
				spin_lock_irqsave(&handle->lock, f);
			} else {
				nvshm_iobuf_free_cluster(
					iob);
			}
		} else {
			/* freed iobuf can form a tree */
			/* Process attached iobufs but do not touch iob */
			/* as it will be freed by next queue_get */
			if (iob->next) {
				nvshm_iobuf_process_freed(
					NVSHM_B2A(handle, iob->next));
			}
		}
		iob = nvshm_queue_get(handle);
	}
	spin_unlock_irqrestore(&handle->lock, f);
	/* Finalize BBC free */
	nvshm_iobuf_bbc_free(handle);
}