Example #1
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
{
	int ret = 0;

	nvmap_mru_lock(client->share);

	if (atomic_read(&h->pin) == 0) {
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	nvmap_mru_unlock(client->share);

	nvmap_handle_put(h);
	return ret;
}
/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
static int map_iovmm_area(struct nvmap_handle *h)
{
	int err;

	BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
	BUG_ON(h->size & ~PAGE_MASK);
	WARN_ON(!h->pgalloc.dirty);

	err = tegra_iovmm_vm_insert_pages(h->pgalloc.area,
					  h->pgalloc.area->iovm_start,
					  h->pgalloc.pages,
					  h->size >> PAGE_SHIFT);
	if (err) {
		tegra_iovmm_zap_vm(h->pgalloc.area);
		return err;
	}
	h->pgalloc.dirty = false;
	return 0;
}
Example #3
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client,
		struct nvmap_handle *h, int free_vm)
{
	int ret = 0;
	nvmap_mru_lock(client->share);
/*                                              */
#if defined(CONFIG_MACH_LGE)
	BUG_ON(!h);
#endif	
/*                                              */
	if (atomic_read(&h->pin) == 0) {
		trace_handle_unpin_error(client, h, atomic_read(&h->pin));
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			if (free_vm) {
				tegra_iovmm_free_vm(h->pgalloc.area);
				h->pgalloc.area = NULL;
			} else
				nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	trace_handle_unpin(client, h, atomic_read(&h->pin));
	nvmap_mru_unlock(client->share);
	nvmap_handle_put(h);
	return ret;
}
/*
 * @client:       nvmap_client which should be used for validation;
 *                should be owned by the process which is submitting
 *                command buffers
 * @ids:          array of nvmap_handles to pin
 * @id_type_mask: bitmask which defines handle type field in handle id.
 * @id_type:      only handles with of this type will be pinned. Handles with
 *                other type are ignored.
 * @nr:           number of entries in arr
 * @unique_arr:   list of nvmap_handle objects which were pinned by
 *                nvmap_pin_array. Must be unpinned after use
 * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding
 *                  to unique_arr. Must be freed after use.
 */
int nvmap_pin_array(struct nvmap_client *client,
		unsigned long	*ids,
		long unsigned id_type_mask,
		long unsigned id_type,
		int nr,
		struct nvmap_handle **unique_arr,
		struct nvmap_handle_ref **unique_arr_refs)
{
	int count = 0;
	int ret = 0;
	int i;

	if (mutex_lock_interruptible(&client->share->pin_lock)) {
		nvmap_err(client, "%s interrupted when acquiring pin lock\n",
			   current->group_leader->comm);
		return -EINTR;
	}

	count = nvmap_validate_get_pin_array(client, ids,
			id_type_mask, id_type, nr,
			unique_arr, unique_arr_refs);

	if (count < 0) {
		mutex_unlock(&client->share->pin_lock);
		nvmap_warn(client, "failed to validate pin array\n");
		return count;
	}

	for (i = 0; i < count; i++)
		unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;

	ret = wait_pin_array_locked(client, unique_arr, count);

	mutex_unlock(&client->share->pin_lock);

	if (WARN_ON(ret)) {
		goto err_out;
	} else {
		for (i = 0; i < count; i++) {
			if (unique_arr[i]->heap_pgalloc &&
			    unique_arr[i]->pgalloc.dirty) {
				ret = map_iovmm_area(unique_arr[i]);
				while (ret && --i >= 0) {
					tegra_iovmm_zap_vm(
						unique_arr[i]->pgalloc.area);
					atomic_dec(&unique_arr_refs[i]->pin);
				}
				if (ret)
					goto err_out_unpin;
			}

			atomic_inc(&unique_arr_refs[i]->pin);
		}
	}
	return count;

err_out_unpin:
	for (i = 0; i < count; i++) {
		/* inc ref counter, because handle_unpin decrements it */
		nvmap_handle_get(unique_arr[i]);
		/* unpin handles and free vm */
		handle_unpin(client, unique_arr[i], true);
	}
err_out:
	for (i = 0; i < count; i++) {
		/* pin ref */
		nvmap_handle_put(unique_arr[i]);
		/* remove duplicate */
		atomic_dec(&unique_arr_refs[i]->dupes);
		nvmap_handle_put(unique_arr[i]);
	}

	return ret;
}
/* pins a list of handle_ref objects; same conditions apply as to
 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
int nvmap_pin_ids(struct nvmap_client *client,
		  unsigned int nr, const unsigned long *ids)
{
	int ret = 0;
	int i;
	struct nvmap_handle **h = (struct nvmap_handle **)ids;
	struct nvmap_handle_ref *ref;

	/* to optimize for the common case (client provided valid handle
	 * references and the pin succeeds), increment the handle_ref pin
	 * count during validation. in error cases, the tree will need to
	 * be re-walked, since the handle_ref is discarded so that an
	 * allocation isn't required. if a handle_ref is not found,
	 * locally validate that the caller has permission to pin the handle;
	 * handle_refs are not created in this case, so it is possible that
	 * if the caller crashes after pinning a global handle, the handle
	 * will be permanently leaked. */
	nvmap_ref_lock(client);
	for (i = 0; i < nr; i++) {
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			atomic_inc(&ref->pin);
			nvmap_handle_get(h[i]);
		} else {
			struct nvmap_handle *verify;
			nvmap_ref_unlock(client);
			verify = nvmap_validate_get(client, ids[i]);
			if (verify) {
				nvmap_warn(client, "%s pinning unreferenced "
					   "handle %p\n",
					   current->group_leader->comm, h[i]);
			} else {
				ret = -EPERM;
				nr = i;
				break;
			}
			nvmap_ref_lock(client);
		}
		if (!h[i]->alloc) {
			ret = -EFAULT;
			nr = i + 1;
			break;
		}
	}
	nvmap_ref_unlock(client);

	if (ret)
		goto out;

	ret = mutex_lock_interruptible(&client->share->pin_lock);
	if (WARN_ON(ret))
		goto out;

	ret = wait_pin_array_locked(client, h, nr);

	mutex_unlock(&client->share->pin_lock);

	if (ret) {
		ret = -EINTR;
	} else {
		for (i = 0; i < nr; i++) {
			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty) {
				ret = map_iovmm_area(h[i]);
				while (ret && --i >= 0)
					tegra_iovmm_zap_vm(h[i]->pgalloc.area);
			}
		}
	}

out:
	if (ret) {
		nvmap_ref_lock(client);
		for (i = 0; i < nr; i++) {
			if (!ids[i])
				continue;

			ref = _nvmap_validate_id_locked(client, ids[i]);
			if (!ref) {
				nvmap_warn(client, "%s freed handle %p "
					   "during pinning\n",
					   current->group_leader->comm,
					   (void *)ids[i]);
				continue;
			}
			atomic_dec(&ref->pin);
		}
		nvmap_ref_unlock(client);

		for (i = 0; i < nr; i++)
			if(h[i])
				nvmap_handle_put(h[i]);
	}

	return ret;
}