Пример #1
0
phys_addr_t nvmap_pin(struct nvmap_client *client,
			struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	phys_addr_t phys;
	int ret = 0;

	h = nvmap_handle_get(ref->handle);
	if (WARN_ON(!h))
		return -EINVAL;

	atomic_inc(&ref->pin);

	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
		ret = -EINTR;
	} else {
		ret = wait_pin_array_locked(client, &h, 1);
		mutex_unlock(&client->share->pin_lock);
	}

	if (ret) {
		atomic_dec(&ref->pin);
		nvmap_handle_put(h);
	} else {
		if (h->heap_pgalloc && h->pgalloc.dirty)
			map_iovmm_area(h);
		phys = handle_phys(h);
	}

	return ret ?: phys;
}
Пример #2
0
void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	unsigned long kaddr;
	pgprot_t prot;
	pte_t **pte;

	BUG_ON(!ref);
	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
	prot = nvmap_pgprot(h, pgprot_kernel);
	pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
	if (!pte)
		goto out;

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	set_pte_at(&init_mm, kaddr, *pte,
				pfn_pte(__phys_to_pfn(paddr), prot));
	flush_tlb_kernel_page(kaddr);
	return (void *)kaddr;
out:
	nvmap_handle_put(ref->handle);
	return NULL;
}
Пример #3
0
/*
 * Pin handle without slow validation step
 */
phys_addr_t _nvmap_pin(struct nvmap_client *client,
			struct nvmap_handle_ref *ref)
{
	int ret = 0;
	struct nvmap_handle *h;
	phys_addr_t phys;

	if (!ref)
		return -EINVAL;

	h = ref->handle;

	if (WARN_ON(!h))
		return -EINVAL;

	h = nvmap_handle_get(h);

	atomic_inc(&ref->pin);

	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
		ret = -EINTR;
	} else {
		ret = wait_pin_array_locked(client, &h, 1);
		mutex_unlock(&client->share->pin_lock);
	}

	if (ret) {
		goto err_out;
	} else {
		if (h->heap_pgalloc && h->pgalloc.dirty)
			ret = map_iovmm_area(h);
		if (ret)
			goto err_out_unpin;
		phys = handle_phys(h);
	}

	return phys;

err_out_unpin:
	nvmap_handle_get(h);
	handle_unpin(client, h, true);
err_out:
	atomic_dec(&ref->pin);
	nvmap_handle_put(h);
	return ret;
}
Пример #4
0
static int pin_array_locked(struct nvmap_client *client,
		struct nvmap_handle **h, int count)
{
	int pinned;
	int i;
	int err = 0;

	/* Flush deferred cache maintenance if needed */
	for (pinned = 0; pinned < count; pinned++)
		if (nvmap_find_cache_maint_op(client->dev, h[pinned]))
			nvmap_cache_maint_ops_flush(client->dev, h[pinned]);

	nvmap_mru_lock(client->share);
	for (pinned = 0; pinned < count; pinned++) {
		err = pin_locked(client, h[pinned]);
		if (err)
			break;
	}
	nvmap_mru_unlock(client->share);

	if (err) {
		/* unpin pinned handles */
		for (i = 0; i < pinned; i++) {
			/* inc ref counter, because
			 * handle_unpin decrements it */
			nvmap_handle_get(h[i]);
			/* unpin handles and free vm */
			handle_unpin(client, h[i], true);
		}
	}

	if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
							client->iovm_limit) {
		/* First attempt to pin in empty iovmm
		 * may still fail because of fragmentation caused by
		 * placing handles in MRU areas. After such failure
		 * all MRU gets cleaned and iovm space is freed.
		 *
		 * We have to do pinning again here since there might be is
		 * no more incoming pin_wait wakeup calls from unpin
		 * operations */
		nvmap_mru_lock(client->share);
		for (pinned = 0; pinned < count; pinned++) {
			err = pin_locked(client, h[pinned]);
			if (err)
				break;
		}
		nvmap_mru_unlock(client->share);

		if (err) {
			pr_err("Pinning in empty iovmm failed!!!\n");
			BUG_ON(1);
		}
	}
	return err;
}
Пример #5
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
Пример #6
0
static int nvmap_validate_get_pin_array(struct nvmap_client *client,
				unsigned long *h,
				long unsigned id_type_mask,
				long unsigned id_type,
				int nr,
				struct nvmap_handle **unique_handles,
				struct nvmap_handle_ref **unique_handle_refs)
{
	int i;
	int err = 0;
	int count = 0;
	unsigned long last_h = 0;
	struct nvmap_handle_ref *last_ref = 0;

	nvmap_ref_lock(client);

	for (i = 0; i < nr; i++) {
		struct nvmap_handle_ref *ref;

		if ((h[i] & id_type_mask) != id_type)
			continue;

		if (last_h == h[i])
			continue;

		ref = _nvmap_validate_id_locked(client, h[i]);

		if (!ref)
			nvmap_err(client, "failed to validate id\n");
		else if (!ref->handle)
			nvmap_err(client, "id had no associated handle\n");
		else if (!ref->handle->alloc)
			nvmap_err(client, "handle had no allocation\n");

		if (!ref || !ref->handle || !ref->handle->alloc) {
			err = -EPERM;
			break;
		}

		last_h = h[i];
		last_ref = ref;
		/* a handle may be referenced multiple times in arr, but
		 * it will only be pinned once; this ensures that the
		 * minimum number of sync-queue slots in the host driver
		 * are dedicated to storing unpin lists, which allows
		 * for greater parallelism between the CPU and graphics
		 * processor */
		if (ref->handle->flags & NVMAP_HANDLE_VISITED)
			continue;

		ref->handle->flags |= NVMAP_HANDLE_VISITED;

		unique_handles[count] = nvmap_handle_get(ref->handle);

		/* Duplicate handle */
		atomic_inc(&ref->dupes);
		nvmap_handle_get(ref->handle);
		unique_handle_refs[count] = ref;

		BUG_ON(!unique_handles[count]);
		count++;
	}

	nvmap_ref_unlock(client);

	if (err) {
		for (i = 0; i < count; i++) {
			unique_handles[i]->flags &= ~NVMAP_HANDLE_VISITED;
			/* pin ref */
			nvmap_handle_put(unique_handles[i]);
			/* ref count */
			atomic_dec(&unique_handle_refs[i]->dupes);
			nvmap_handle_put(unique_handles[i]);
		}
	}

	return err ? err : count;
}
Пример #7
0
/* pins a list of handle_ref objects; same conditions apply as to
 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
int nvmap_pin_ids(struct nvmap_client *client,
		  unsigned int nr, const unsigned long *ids)
{
	int ret = 0;
	unsigned int i;
	struct nvmap_handle **h = (struct nvmap_handle **)ids;
	struct nvmap_handle_ref *ref;

	/* to optimize for the common case (client provided valid handle
	 * references and the pin succeeds), increment the handle_ref pin
	 * count during validation. in error cases, the tree will need to
	 * be re-walked, since the handle_ref is discarded so that an
	 * allocation isn't required. if a handle_ref is not found,
	 * locally validate that the caller has permission to pin the handle;
	 * handle_refs are not created in this case, so it is possible that
	 * if the caller crashes after pinning a global handle, the handle
	 * will be permanently leaked. */
	nvmap_ref_lock(client);
	for (i = 0; i < nr; i++) {
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			atomic_inc(&ref->pin);
			nvmap_handle_get(h[i]);
		} else {
			struct nvmap_handle *verify;
			nvmap_ref_unlock(client);
			verify = nvmap_validate_get(client, ids[i]);
			if (verify) {
				nvmap_warn(client, "%s pinning unreferenced "
					   "handle %p\n",
					   current->group_leader->comm, h[i]);
			} else {
				ret = -EPERM;
				nr = i;
				break;
			}
			nvmap_ref_lock(client);
		}
		if (!h[i]->alloc) {
			ret = -EFAULT;
			nr = i + 1;
			break;
		}
	}
	nvmap_ref_unlock(client);

	if (ret)
		goto out;

	ret = mutex_lock_interruptible(&client->share->pin_lock);
	if (WARN_ON(ret))
		goto out;

	ret = wait_pin_array_locked(client, h, nr);

	mutex_unlock(&client->share->pin_lock);

	if (ret) {
		ret = -EINTR;
	} else {
		for (i = 0; i < nr; i++) {
			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
				map_iovmm_area(h[i]);
		}
	}

out:
	if (ret) {
		nvmap_ref_lock(client);
		for (i = 0; i < nr; i++) {
			if (!ids[i])
				continue;

			ref = _nvmap_validate_id_locked(client, ids[i]);
			if (!ref) {
				nvmap_warn(client, "%s freed handle %p "
					   "during pinning\n",
					   current->group_leader->comm,
					   (void *)ids[i]);
				continue;
			}
			atomic_dec(&ref->pin);
		}
		nvmap_ref_unlock(client);

		for (i = 0; i < nr; i++)
			if(h[i])
				nvmap_handle_put(h[i]);
	}

	return ret;
}
Пример #8
0
/*
 * @client:       nvmap_client which should be used for validation;
 *                should be owned by the process which is submitting
 *                command buffers
 * @ids:          array of nvmap_handles to pin
 * @id_type_mask: bitmask which defines handle type field in handle id.
 * @id_type:      only handles with of this type will be pinned. Handles with
 *                other type are ignored.
 * @nr:           number of entries in arr
 * @unique_arr:   list of nvmap_handle objects which were pinned by
 *                nvmap_pin_array. Must be unpinned after use
 * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding
 *                  to unique_arr. Must be freed after use.
 */
int nvmap_pin_array(struct nvmap_client *client,
		unsigned long	*ids,
		long unsigned id_type_mask,
		long unsigned id_type,
		int nr,
		struct nvmap_handle **unique_arr,
		struct nvmap_handle_ref **unique_arr_refs)
{
	int count = 0;
	int ret = 0;
	int i;

	if (mutex_lock_interruptible(&client->share->pin_lock)) {
		nvmap_err(client, "%s interrupted when acquiring pin lock\n",
			   current->group_leader->comm);
		return -EINTR;
	}

	count = nvmap_validate_get_pin_array(client, ids,
			id_type_mask, id_type, nr,
			unique_arr, unique_arr_refs);

	if (count < 0) {
		mutex_unlock(&client->share->pin_lock);
		nvmap_warn(client, "failed to validate pin array\n");
		return count;
	}

	for (i = 0; i < count; i++)
		unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;

	ret = wait_pin_array_locked(client, unique_arr, count);

	mutex_unlock(&client->share->pin_lock);

	if (WARN_ON(ret)) {
		goto err_out;
	} else {
		for (i = 0; i < count; i++) {
			if (unique_arr[i]->heap_pgalloc &&
			    unique_arr[i]->pgalloc.dirty) {
				ret = map_iovmm_area(unique_arr[i]);
				while (ret && --i >= 0) {
					tegra_iovmm_zap_vm(
						unique_arr[i]->pgalloc.area);
					atomic_dec(&unique_arr_refs[i]->pin);
				}
				if (ret)
					goto err_out_unpin;
			}

			atomic_inc(&unique_arr_refs[i]->pin);
		}
	}
	return count;

err_out_unpin:
	for (i = 0; i < count; i++) {
		/* inc ref counter, because handle_unpin decrements it */
		nvmap_handle_get(unique_arr[i]);
		/* unpin handles and free vm */
		handle_unpin(client, unique_arr[i], true);
	}
err_out:
	for (i = 0; i < count; i++) {
		/* pin ref */
		nvmap_handle_put(unique_arr[i]);
		/* remove duplicate */
		atomic_dec(&unique_arr_refs[i]->dupes);
		nvmap_handle_put(unique_arr[i]);
	}

	return ret;
}