예제 #1
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client,
		struct nvmap_handle *h, int free_vm)
{
	int ret = 0;
	nvmap_mru_lock(client->share);
/*                                              */
#if defined(CONFIG_MACH_LGE)
	BUG_ON(!h);
#endif	
/*                                              */
	if (atomic_read(&h->pin) == 0) {
		trace_handle_unpin_error(client, h, atomic_read(&h->pin));
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			if (free_vm) {
				tegra_iovmm_free_vm(h->pgalloc.area);
				h->pgalloc.area = NULL;
			} else
				nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	trace_handle_unpin(client, h, atomic_read(&h->pin));
	nvmap_mru_unlock(client->share);
	nvmap_handle_put(h);
	return ret;
}
예제 #2
0
/* returns a tegra_iovmm_area for a handle. if the handle already has
 * an iovmm_area allocated, the handle is simply removed from its MRU list
 * and the existing iovmm_area is returned.
 *
 * if no existing allocation exists, try to allocate a new IOVMM area.
 *
 * if a new area can not be allocated, try to re-use the most-recently-unpinned
 * handle's allocation.
 *
 * and if that fails, iteratively evict handles from the MRU lists and free
 * their allocations, until the new allocation succeeds.
 */
struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
					    struct nvmap_handle *h)
{
	struct list_head *mru;
	struct nvmap_handle *evict = NULL;
	struct tegra_iovmm_area *vm = NULL;
	unsigned int i, idx;
	pgprot_t prot;

	BUG_ON(!h || !c || !c->share);

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->pgalloc.area) {
		BUG_ON(list_empty(&h->pgalloc.mru_list));
		list_del(&h->pgalloc.mru_list);
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return h->pgalloc.area;
	}

	vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);

	if (vm) {
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return vm;
	}
	/* attempt to re-use the most recently unpinned IOVMM area in the
	 * same size bin as the current handle. If that fails, iteratively
	 * evict handles (starting from the current bin) until an allocation
	 * succeeds or no more areas can be evicted */
	mru = mru_list(c->share, h->size);
	if (!list_empty(mru))
		evict = list_first_entry(mru, struct nvmap_handle,
					 pgalloc.mru_list);

	if (evict && evict->pgalloc.area->iovm_length >= h->size) {
		list_del(&evict->pgalloc.mru_list);
		vm = evict->pgalloc.area;
		evict->pgalloc.area = NULL;
		INIT_LIST_HEAD(&evict->pgalloc.mru_list);
		return vm;
	}

	idx = mru - c->share->mru_lists;

	for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
		if (idx >= c->share->nr_mru)
			idx = 0;
		mru = &c->share->mru_lists[idx];
		while (!list_empty(mru) && !vm) {
			evict = list_first_entry(mru, struct nvmap_handle,
						 pgalloc.mru_list);

			BUG_ON(atomic_read(&evict->pin) != 0);
			BUG_ON(!evict->pgalloc.area);
			list_del(&evict->pgalloc.mru_list);
			INIT_LIST_HEAD(&evict->pgalloc.mru_list);
			tegra_iovmm_free_vm(evict->pgalloc.area);
			evict->pgalloc.area = NULL;
			vm = tegra_iovmm_create_vm(c->share->iovmm,
						   NULL, h->size, prot);
		}
	}
	return vm;
}