Ejemplo n.º 1
0
void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
{
	if (!ref)
		return;

	atomic_dec(&ref->pin);
	if (handle_unpin(client, ref->handle, false))
		wake_up(&client->share->pin_wait);
}
Ejemplo n.º 2
0
static int pin_array_locked(struct nvmap_client *client,
		struct nvmap_handle **h, int count)
{
	int pinned;
	int i;
	int err = 0;

	/* Flush deferred cache maintenance if needed */
	for (pinned = 0; pinned < count; pinned++)
		if (nvmap_find_cache_maint_op(client->dev, h[pinned]))
			nvmap_cache_maint_ops_flush(client->dev, h[pinned]);

	nvmap_mru_lock(client->share);
	for (pinned = 0; pinned < count; pinned++) {
		err = pin_locked(client, h[pinned]);
		if (err)
			break;
	}
	nvmap_mru_unlock(client->share);

	if (err) {
		/* unpin pinned handles */
		for (i = 0; i < pinned; i++) {
			/* inc ref counter, because
			 * handle_unpin decrements it */
			nvmap_handle_get(h[i]);
			/* unpin handles and free vm */
			handle_unpin(client, h[i], true);
		}
	}

	if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
							client->iovm_limit) {
		/* First attempt to pin in empty iovmm
		 * may still fail because of fragmentation caused by
		 * placing handles in MRU areas. After such failure
		 * all MRU gets cleaned and iovm space is freed.
		 *
		 * We have to do pinning again here since there might be is
		 * no more incoming pin_wait wakeup calls from unpin
		 * operations */
		nvmap_mru_lock(client->share);
		for (pinned = 0; pinned < count; pinned++) {
			err = pin_locked(client, h[pinned]);
			if (err)
				break;
		}
		nvmap_mru_unlock(client->share);

		if (err) {
			pr_err("Pinning in empty iovmm failed!!!\n");
			BUG_ON(1);
		}
	}
	return err;
}
Ejemplo n.º 3
0
void nvmap_unpin_handles(struct nvmap_client *client,
			 struct nvmap_handle **h, int nr)
{
	int i;
	int do_wake = 0;

	for (i = 0; i < nr; i++) {
		if (WARN_ON(!h[i]))
			continue;
		do_wake |= handle_unpin(client, h[i], false);
	}

	if (do_wake)
		wake_up(&client->share->pin_wait);
}
Ejemplo n.º 4
0
/*
 * Pin handle without slow validation step
 */
phys_addr_t _nvmap_pin(struct nvmap_client *client,
			struct nvmap_handle_ref *ref)
{
	int ret = 0;
	struct nvmap_handle *h;
	phys_addr_t phys;

	if (!ref)
		return -EINVAL;

	h = ref->handle;

	if (WARN_ON(!h))
		return -EINVAL;

	h = nvmap_handle_get(h);

	atomic_inc(&ref->pin);

	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
		ret = -EINTR;
	} else {
		ret = wait_pin_array_locked(client, &h, 1);
		mutex_unlock(&client->share->pin_lock);
	}

	if (ret) {
		goto err_out;
	} else {
		if (h->heap_pgalloc && h->pgalloc.dirty)
			ret = map_iovmm_area(h);
		if (ret)
			goto err_out_unpin;
		phys = handle_phys(h);
	}

	return phys;

err_out_unpin:
	nvmap_handle_get(h);
	handle_unpin(client, h, true);
err_out:
	atomic_dec(&ref->pin);
	nvmap_handle_put(h);
	return ret;
}
Ejemplo n.º 5
0
static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
{
	struct nvmap_handle *h;
	int w;

	h = nvmap_validate_get(client, id);
	if (unlikely(!h)) {
		nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
			  current->group_leader->comm, (void *)id);
		return 0;
	}

	nvmap_err(client, "%s unpinning unreferenced handle %p\n",
		  current->group_leader->comm, h);
	WARN_ON(1);

	w = handle_unpin(client, h, false);
	nvmap_handle_put(h);
	return w;
}
Ejemplo n.º 6
0
void nvmap_unpin_ids(struct nvmap_client *client,
		     unsigned int nr, const unsigned long *ids)
{
	unsigned int i;
	int do_wake = 0;

	for (i = 0; i < nr; i++) {
		struct nvmap_handle_ref *ref;

		if (!ids[i])
			continue;

		nvmap_ref_lock(client);
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			struct nvmap_handle *h = ref->handle;
			int e = atomic_add_unless(&ref->pin, -1, 0);

			nvmap_ref_unlock(client);

			if (!e) {
				nvmap_err(client, "%s unpinning unpinned "
					  "handle %08lx\n",
					  current->group_leader->comm, ids[i]);
			} else {
				do_wake |= handle_unpin(client, h, false);
			}
		} else {
			nvmap_ref_unlock(client);
			if (client->super)
				do_wake |= handle_unpin_noref(client, ids[i]);
			else
				nvmap_err(client, "%s unpinning invalid "
					  "handle %08lx\n",
					  current->group_leader->comm, ids[i]);
		}
	}

	if (do_wake)
		wake_up(&client->share->pin_wait);
}
Ejemplo n.º 7
0
/*
 * @client:       nvmap_client which should be used for validation;
 *                should be owned by the process which is submitting
 *                command buffers
 * @ids:          array of nvmap_handles to pin
 * @id_type_mask: bitmask which defines handle type field in handle id.
 * @id_type:      only handles with of this type will be pinned. Handles with
 *                other type are ignored.
 * @nr:           number of entries in arr
 * @unique_arr:   list of nvmap_handle objects which were pinned by
 *                nvmap_pin_array. Must be unpinned after use
 * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding
 *                  to unique_arr. Must be freed after use.
 */
int nvmap_pin_array(struct nvmap_client *client,
		unsigned long	*ids,
		long unsigned id_type_mask,
		long unsigned id_type,
		int nr,
		struct nvmap_handle **unique_arr,
		struct nvmap_handle_ref **unique_arr_refs)
{
	int count = 0;
	int ret = 0;
	int i;

	if (mutex_lock_interruptible(&client->share->pin_lock)) {
		nvmap_err(client, "%s interrupted when acquiring pin lock\n",
			   current->group_leader->comm);
		return -EINTR;
	}

	count = nvmap_validate_get_pin_array(client, ids,
			id_type_mask, id_type, nr,
			unique_arr, unique_arr_refs);

	if (count < 0) {
		mutex_unlock(&client->share->pin_lock);
		nvmap_warn(client, "failed to validate pin array\n");
		return count;
	}

	for (i = 0; i < count; i++)
		unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;

	ret = wait_pin_array_locked(client, unique_arr, count);

	mutex_unlock(&client->share->pin_lock);

	if (WARN_ON(ret)) {
		goto err_out;
	} else {
		for (i = 0; i < count; i++) {
			if (unique_arr[i]->heap_pgalloc &&
			    unique_arr[i]->pgalloc.dirty) {
				ret = map_iovmm_area(unique_arr[i]);
				while (ret && --i >= 0) {
					tegra_iovmm_zap_vm(
						unique_arr[i]->pgalloc.area);
					atomic_dec(&unique_arr_refs[i]->pin);
				}
				if (ret)
					goto err_out_unpin;
			}

			atomic_inc(&unique_arr_refs[i]->pin);
		}
	}
	return count;

err_out_unpin:
	for (i = 0; i < count; i++) {
		/* inc ref counter, because handle_unpin decrements it */
		nvmap_handle_get(unique_arr[i]);
		/* unpin handles and free vm */
		handle_unpin(client, unique_arr[i], true);
	}
err_out:
	for (i = 0; i < count; i++) {
		/* pin ref */
		nvmap_handle_put(unique_arr[i]);
		/* remove duplicate */
		atomic_dec(&unique_arr_refs[i]->dupes);
		nvmap_handle_put(unique_arr[i]);
	}

	return ret;
}
Ejemplo n.º 8
0
/* pins a list of handle_ref objects; same conditions apply as to
 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
int nvmap_pin_ids(struct nvmap_client *client,
		  unsigned int nr, const unsigned long *ids)
{
	int ret = 0;
	int cnt = 0;
	unsigned int i;
	struct nvmap_handle **h = (struct nvmap_handle **)ids;
	struct nvmap_handle_ref *ref;

	/* to optimize for the common case (client provided valid handle
	 * references and the pin succeeds), increment the handle_ref pin
	 * count during validation. in error cases, the tree will need to
	 * be re-walked, since the handle_ref is discarded so that an
	 * allocation isn't required. if a handle_ref is not found,
	 * locally validate that the caller has permission to pin the handle;
	 * handle_refs are not created in this case, so it is possible that
	 * if the caller crashes after pinning a global handle, the handle
	 * will be permanently leaked. */
	nvmap_ref_lock(client);
	for (i = 0; i < nr && !ret; i++) {
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			atomic_inc(&ref->pin);
			nvmap_handle_get(h[i]);
		} else {
			struct nvmap_handle *verify;
			nvmap_ref_unlock(client);
			verify = nvmap_validate_get(client, ids[i]);
			if (verify)
				nvmap_warn(client, "%s pinning unreferenced "
					   "handle %p\n",
					   current->group_leader->comm, h[i]);
			else
				ret = -EPERM;
			nvmap_ref_lock(client);
		}
	}
	nvmap_ref_unlock(client);

	nr = i;

	if (ret)
		goto out;

	ret = mutex_lock_interruptible(&client->share->pin_lock);
	if (WARN_ON(ret))
		goto out;

	for (cnt = 0; cnt < nr && !ret; cnt++) {
		ret = wait_pin_locked(client, h[cnt]);
	}
	mutex_unlock(&client->share->pin_lock);

	if (ret) {
		int do_wake = 0;

		for (i = 0; i < cnt; i++)
			do_wake |= handle_unpin(client, h[i]);

		if (do_wake)
			wake_up(&client->share->pin_wait);

		ret = -EINTR;
	} else {
		for (i = 0; i < nr; i++) {
			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
				map_iovmm_area(h[i]);
		}
	}

out:
	if (ret) {
		nvmap_ref_lock(client);
		for (i = 0; i < nr; i++) {
			ref = _nvmap_validate_id_locked(client, ids[i]);
			if (!ref) {
				nvmap_warn(client, "%s freed handle %p "
					   "during pinning\n",
					   current->group_leader->comm,
					   (void *)ids[i]);
				continue;
			}
			atomic_dec(&ref->pin);
		}
		nvmap_ref_unlock(client);

		for (i = cnt; i < nr; i++)
			nvmap_handle_put(h[i]);
	}

	return ret;
}