Пример #1
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
{
	int ret = 0;

	nvmap_mru_lock(client->share);

	if (atomic_read(&h->pin) == 0) {
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	nvmap_mru_unlock(client->share);

	nvmap_handle_put(h);
	return ret;
}
Пример #2
0
static int pin_array_locked(struct nvmap_client *client,
		struct nvmap_handle **h, int count)
{
	int pinned;
	int i;
	int err = 0;

	/* Flush deferred cache maintenance if needed */
	for (pinned = 0; pinned < count; pinned++)
		if (nvmap_find_cache_maint_op(client->dev, h[pinned]))
			nvmap_cache_maint_ops_flush(client->dev, h[pinned]);

	nvmap_mru_lock(client->share);
	for (pinned = 0; pinned < count; pinned++) {
		err = pin_locked(client, h[pinned]);
		if (err)
			break;
	}
	nvmap_mru_unlock(client->share);

	if (err) {
		/* unpin pinned handles */
		for (i = 0; i < pinned; i++) {
			/* inc ref counter, because
			 * handle_unpin decrements it */
			nvmap_handle_get(h[i]);
			/* unpin handles and free vm */
			handle_unpin(client, h[i], true);
		}
	}

	if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
							client->iovm_limit) {
		/* First attempt to pin in empty iovmm
		 * may still fail because of fragmentation caused by
		 * placing handles in MRU areas. After such failure
		 * all MRU gets cleaned and iovm space is freed.
		 *
		 * We have to do pinning again here since there might be is
		 * no more incoming pin_wait wakeup calls from unpin
		 * operations */
		nvmap_mru_lock(client->share);
		for (pinned = 0; pinned < count; pinned++) {
			err = pin_locked(client, h[pinned]);
			if (err)
				break;
		}
		nvmap_mru_unlock(client->share);

		if (err) {
			pr_err("Pinning in empty iovmm failed!!!\n");
			BUG_ON(1);
		}
	}
	return err;
}
Пример #3
0
void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
{
	nvmap_mru_lock(s);
	if (!list_empty(&h->pgalloc.mru_list))
		list_del(&h->pgalloc.mru_list);
	nvmap_mru_unlock(s);
	INIT_LIST_HEAD(&h->pgalloc.mru_list);
}
Пример #4
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client,
		struct nvmap_handle *h, int free_vm)
{
	int ret = 0;
	nvmap_mru_lock(client->share);
/*                                              */
#if defined(CONFIG_MACH_LGE)
	BUG_ON(!h);
#endif	
/*                                              */
	if (atomic_read(&h->pin) == 0) {
		trace_handle_unpin_error(client, h, atomic_read(&h->pin));
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			if (free_vm) {
				tegra_iovmm_free_vm(h->pgalloc.area);
				h->pgalloc.area = NULL;
			} else
				nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	trace_handle_unpin(client, h, atomic_read(&h->pin));
	nvmap_mru_unlock(client->share);
	nvmap_handle_put(h);
	return ret;
}
Пример #5
0
/* must be called inside nvmap_pin_lock, to ensure that an entire stream
 * of pins will complete without racing with a second stream. handle should
 * have nvmap_handle_get (or nvmap_validate_get) called before calling
 * this function. */
static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
{
	struct tegra_iovmm_area *area;
	BUG_ON(!h->alloc);

	nvmap_mru_lock(client->share);
	if (atomic_inc_return(&h->pin) == 1) {
		if (h->heap_pgalloc && !h->pgalloc.contig) {
			area = nvmap_handle_iovmm_locked(client, h);
			if (!area) {
				/* no race here, inside the pin mutex */
				atomic_dec(&h->pin);
				nvmap_mru_unlock(client->share);
				return -ENOMEM;
			}
			if (area != h->pgalloc.area)
				h->pgalloc.dirty = true;
			h->pgalloc.area = area;
		}
	}
	nvmap_mru_unlock(client->share);
	return 0;
}