struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
					struct nvmap_handle *h, bool skip_val)
{
	struct nvmap_handle_ref *ref = NULL;

	BUG_ON(!client);
	/* on success, the reference count for the handle should be
	 * incremented, so the success paths will not call nvmap_handle_put */
	h = nvmap_validate_get(h);

	if (!h) {
		pr_debug("%s duplicate handle failed\n",
			    current->group_leader->comm);
		return ERR_PTR(-EPERM);
	}

	if (!h->alloc) {
		pr_err("%s duplicating unallocated handle\n",
			current->group_leader->comm);
		nvmap_handle_put(h);
		return ERR_PTR(-EINVAL);
	}

	nvmap_ref_lock(client);
	ref = __nvmap_validate_locked(client, h);

	if (ref) {
		/* handle already duplicated in client; just increment
		 * the reference count rather than re-duplicating it */
		atomic_inc(&ref->dupes);
		nvmap_ref_unlock(client);
		return ref;
	}

	nvmap_ref_unlock(client);

	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	if (!ref) {
		nvmap_handle_put(h);
		return ERR_PTR(-ENOMEM);
	}

	atomic_set(&ref->dupes, 1);
	ref->handle = h;
	atomic_set(&ref->pin, 0);
	add_handle_ref(client, ref);

	/*
	 * Ref counting on the dma_bufs follows the creation and destruction of
	 * nvmap_handle_refs. That is every time a handle_ref is made the
	 * dma_buf ref count goes up and everytime a handle_ref is destroyed
	 * the dma_buf ref count goes down.
	 */
	get_dma_buf(h->dmabuf);

	trace_nvmap_duplicate_handle(client, h, ref);
	return ref;
}
Пример #2
0
/*
 * @client:       nvmap_client which should be used for validation;
 *                should be owned by the process which is submitting
 *                command buffers
 * @ids:          array of nvmap_handles to pin
 * @id_type_mask: bitmask which defines handle type field in handle id.
 * @id_type:      only handles with of this type will be pinned. Handles with
 *                other type are ignored.
 * @nr:           number of entries in arr
 * @unique_arr:   list of nvmap_handle objects which were pinned by
 *                nvmap_pin_array. Must be unpinned after use
 * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding
 *                  to unique_arr. Must be freed after use.
 */
int nvmap_pin_array(struct nvmap_client *client,
		unsigned long	*ids,
		long unsigned id_type_mask,
		long unsigned id_type,
		int nr,
		struct nvmap_handle **unique_arr,
		struct nvmap_handle_ref **unique_arr_refs)
{
	int count = 0;
	int ret = 0;
	int i;

	if (mutex_lock_interruptible(&client->share->pin_lock)) {
		nvmap_err(client, "%s interrupted when acquiring pin lock\n",
			   current->group_leader->comm);
		return -EINTR;
	}

	count = nvmap_validate_get_pin_array(client, ids,
			id_type_mask, id_type, nr,
			unique_arr, unique_arr_refs);

	if (count < 0) {
		mutex_unlock(&client->share->pin_lock);
		nvmap_warn(client, "failed to validate pin array\n");
		return count;
	}

	for (i = 0; i < count; i++)
		unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;

	ret = wait_pin_array_locked(client, unique_arr, count);

	mutex_unlock(&client->share->pin_lock);

	if (WARN_ON(ret)) {
		for (i = 0; i < count; i++) {
			/* pin ref */
			nvmap_handle_put(unique_arr[i]);
			/* remove duplicate */
			atomic_dec(&unique_arr_refs[i]->dupes);
			nvmap_handle_put(unique_arr[i]);
		}
		return ret;
	} else {
		for (i = 0; i < count; i++) {
			if (unique_arr[i]->heap_pgalloc &&
			    unique_arr[i]->pgalloc.dirty)
				map_iovmm_area(unique_arr[i]);

			atomic_inc(&unique_arr_refs[i]->pin);
		}
	}
	return count;
}
Пример #3
0
void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
		  void *addr)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	pte_t **pte;

	BUG_ON(!addr || !ref);
	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
	    h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
		dmac_flush_range(addr, addr + PAGE_SIZE);
		outer_flush_range(paddr, paddr + PAGE_SIZE);
	}

	pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
	nvmap_free_pte(nvmap_dev, pte);
	nvmap_handle_put(h);
}
Пример #4
0
void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	unsigned long kaddr;
	pgprot_t prot;
	pte_t **pte;

	BUG_ON(!ref);
	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
	prot = nvmap_pgprot(h, pgprot_kernel);
	pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
	if (!pte)
		goto out;

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	set_pte_at(&init_mm, kaddr, *pte,
				pfn_pte(__phys_to_pfn(paddr), prot));
	flush_tlb_kernel_page(kaddr);
	return (void *)kaddr;
out:
	nvmap_handle_put(ref->handle);
	return NULL;
}
Пример #5
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
{
	int ret = 0;

	nvmap_mru_lock(client->share);

	if (atomic_read(&h->pin) == 0) {
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	nvmap_mru_unlock(client->share);

	nvmap_handle_put(h);
	return ret;
}
Пример #6
0
phys_addr_t nvmap_pin(struct nvmap_client *client,
			struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	phys_addr_t phys;
	int ret = 0;

	h = nvmap_handle_get(ref->handle);
	if (WARN_ON(!h))
		return -EINVAL;

	atomic_inc(&ref->pin);

	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
		ret = -EINTR;
	} else {
		ret = wait_pin_array_locked(client, &h, 1);
		mutex_unlock(&client->share->pin_lock);
	}

	if (ret) {
		atomic_dec(&ref->pin);
		nvmap_handle_put(h);
	} else {
		if (h->heap_pgalloc && h->pgalloc.dirty)
			map_iovmm_area(h);
		phys = handle_phys(h);
	}

	return ret ?: phys;
}
Пример #7
0
/* stores the physical address (+offset) of each handle relocation entry
 * into its output location. see nvmap_pin_array for more details.
 *
 * each entry in arr (i.e., each relocation request) specifies two handles:
 * the handle to pin (pin), and the handle where the address of pin should be
 * written (patch). in pseudocode, this loop basically looks like:
 *
 * for (i = 0; i < nr; i++) {
 *     (pin, pin_offset, patch, patch_offset) = arr[i];
 *     patch[patch_offset] = address_of(pin) + pin_offset;
 * }
 */
static int nvmap_reloc_pin_array(struct nvmap_client *client,
				 const struct nvmap_pinarray_elem *arr,
				 int nr, struct nvmap_handle *gather)
{
	struct nvmap_handle *last_patch = NULL;
	unsigned int last_pfn = 0;
	pte_t **pte;
	void *addr;
	int i;

	pte = nvmap_alloc_pte(client->dev, &addr);
	if (IS_ERR(pte))
		return PTR_ERR(pte);

	for (i = 0; i < nr; i++) {
		struct nvmap_handle *patch;
		struct nvmap_handle *pin;
		phys_addr_t reloc_addr;
		phys_addr_t phys;
		unsigned int pfn;

		/* all of the handles are validated and get'ted prior to
		 * calling this function, so casting is safe here */
		pin = (struct nvmap_handle *)arr[i].pin_mem;

		if (arr[i].patch_mem == (unsigned long)last_patch) {
			patch = last_patch;
		} else if (arr[i].patch_mem == (unsigned long)gather) {
			patch = gather;
		} else {
			if (last_patch)
				nvmap_handle_put(last_patch);

			patch = nvmap_get_handle_id(client, arr[i].patch_mem);
			if (!patch) {
				nvmap_free_pte(client->dev, pte);
				return -EPERM;
			}
			last_patch = patch;
		}

                if (!patch) {
                        nvmap_free_pte(client->dev, pte);
                        return -EPERM;
                }

		if (patch->heap_pgalloc) {
			unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
			phys = page_to_phys(patch->pgalloc.pages[page]);
			phys += (arr[i].patch_offset & ~PAGE_MASK);
		} else {
Пример #8
0
phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
{
	struct nvmap_handle *h;
	phys_addr_t phys;

	h = nvmap_get_handle_id(c, id);
	if (!h)
		return -EPERM;
	mutex_lock(&h->lock);
	phys = handle_phys(h);
	mutex_unlock(&h->lock);
	nvmap_handle_put(h);

	return phys;
}
struct nvmap_handle_ref *nvmap_create_handle_from_fd(
			struct nvmap_client *client, int fd)
{
	struct nvmap_handle *handle;
	struct nvmap_handle_ref *ref;

	BUG_ON(!client);

	handle = nvmap_handle_get_from_dmabuf_fd(client, fd);
	if (IS_ERR(handle))
		return ERR_CAST(handle);
	ref = nvmap_duplicate_handle(client, handle, 1);
	nvmap_handle_put(handle);
	return ref;
}
Пример #10
0
/*
 * Pin handle without slow validation step
 */
phys_addr_t _nvmap_pin(struct nvmap_client *client,
			struct nvmap_handle_ref *ref)
{
	int ret = 0;
	struct nvmap_handle *h;
	phys_addr_t phys;

	if (!ref)
		return -EINVAL;

	h = ref->handle;

	if (WARN_ON(!h))
		return -EINVAL;

	h = nvmap_handle_get(h);

	atomic_inc(&ref->pin);

	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
		ret = -EINTR;
	} else {
		ret = wait_pin_array_locked(client, &h, 1);
		mutex_unlock(&client->share->pin_lock);
	}

	if (ret) {
		goto err_out;
	} else {
		if (h->heap_pgalloc && h->pgalloc.dirty)
			ret = map_iovmm_area(h);
		if (ret)
			goto err_out_unpin;
		phys = handle_phys(h);
	}

	return phys;

err_out_unpin:
	nvmap_handle_get(h);
	handle_unpin(client, h, true);
err_out:
	atomic_dec(&ref->pin);
	nvmap_handle_put(h);
	return ret;
}
Пример #11
0
/* doesn't need to be called inside nvmap_pin_lock, since this will only
 * expand the available VM area */
static int handle_unpin(struct nvmap_client *client,
		struct nvmap_handle *h, int free_vm)
{
	int ret = 0;
	nvmap_mru_lock(client->share);
/*                                              */
#if defined(CONFIG_MACH_LGE)
	BUG_ON(!h);
#endif	
/*                                              */
	if (atomic_read(&h->pin) == 0) {
		trace_handle_unpin_error(client, h, atomic_read(&h->pin));
		nvmap_err(client, "%s unpinning unpinned handle %p\n",
			  current->group_leader->comm, h);
		nvmap_mru_unlock(client->share);
		return 0;
	}

	BUG_ON(!h->alloc);

	if (!atomic_dec_return(&h->pin)) {
		if (h->heap_pgalloc && h->pgalloc.area) {
			/* if a secure handle is clean (i.e., mapped into
			 * IOVMM, it needs to be zapped on unpin. */
			if (h->secure && !h->pgalloc.dirty) {
				tegra_iovmm_zap_vm(h->pgalloc.area);
				h->pgalloc.dirty = true;
			}
			if (free_vm) {
				tegra_iovmm_free_vm(h->pgalloc.area);
				h->pgalloc.area = NULL;
			} else
				nvmap_mru_insert_locked(client->share, h);
			ret = 1;
		}
	}

	trace_handle_unpin(client, h, atomic_read(&h->pin));
	nvmap_mru_unlock(client->share);
	nvmap_handle_put(h);
	return ret;
}
Пример #12
0
static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
{
	struct nvmap_handle *h;
	int w;

	h = nvmap_validate_get(client, id);
	if (unlikely(!h)) {
		nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
			  current->group_leader->comm, (void *)id);
		return 0;
	}

	nvmap_err(client, "%s unpinning unreferenced handle %p\n",
		  current->group_leader->comm, h);
	WARN_ON(1);

	w = handle_unpin(client, h, false);
	nvmap_handle_put(h);
	return w;
}
Пример #13
0
/* Overlay window manipulation */
static int tegra_overlay_pin_window(struct tegra_overlay_info *overlay,
				    struct tegra_overlay_flip_win *flip_win,
				    struct nvmap_client *user_nvmap)
{
	struct nvmap_handle_ref *win_dupe;
	struct nvmap_handle *win_handle;
	unsigned long buff_id = flip_win->attr.buff_id;

	if (!buff_id)
		return 0;

	win_handle = nvmap_get_handle_id(user_nvmap, buff_id);
	if (win_handle == NULL) {
		dev_err(&overlay->ndev->dev, "%s: flip invalid "
			"handle %08lx\n", current->comm, buff_id);
		return -EPERM;
	}

	/* duplicate the new framebuffer's handle into the fb driver's
	 * nvmap context, to ensure that the handle won't be freed as
	 * long as it is in-use by the fb driver */
	win_dupe = nvmap_duplicate_handle_id(overlay->overlay_nvmap, buff_id);
	nvmap_handle_put(win_handle);

	if (IS_ERR(win_dupe)) {
		dev_err(&overlay->ndev->dev, "couldn't duplicate handle\n");
		return PTR_ERR(win_dupe);
	}

	flip_win->handle = win_dupe;

	flip_win->phys_addr = nvmap_pin(overlay->overlay_nvmap, win_dupe);
	if (IS_ERR((void *)flip_win->phys_addr)) {
		dev_err(&overlay->ndev->dev, "couldn't pin handle\n");
		nvmap_free(overlay->overlay_nvmap, win_dupe);
		return PTR_ERR((void *)flip_win->phys_addr);
	}

	return 0;
}
Пример #14
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
Пример #15
0
static int nvmap_validate_get_pin_array(struct nvmap_client *client,
				unsigned long *h,
				long unsigned id_type_mask,
				long unsigned id_type,
				int nr,
				struct nvmap_handle **unique_handles,
				struct nvmap_handle_ref **unique_handle_refs)
{
	int i;
	int err = 0;
	int count = 0;
	unsigned long last_h = 0;
	struct nvmap_handle_ref *last_ref = 0;

	nvmap_ref_lock(client);

	for (i = 0; i < nr; i++) {
		struct nvmap_handle_ref *ref;

		if ((h[i] & id_type_mask) != id_type)
			continue;

		if (last_h == h[i])
			continue;

		ref = _nvmap_validate_id_locked(client, h[i]);

		if (!ref)
			nvmap_err(client, "failed to validate id\n");
		else if (!ref->handle)
			nvmap_err(client, "id had no associated handle\n");
		else if (!ref->handle->alloc)
			nvmap_err(client, "handle had no allocation\n");

		if (!ref || !ref->handle || !ref->handle->alloc) {
			err = -EPERM;
			break;
		}

		last_h = h[i];
		last_ref = ref;
		/* a handle may be referenced multiple times in arr, but
		 * it will only be pinned once; this ensures that the
		 * minimum number of sync-queue slots in the host driver
		 * are dedicated to storing unpin lists, which allows
		 * for greater parallelism between the CPU and graphics
		 * processor */
		if (ref->handle->flags & NVMAP_HANDLE_VISITED)
			continue;

		ref->handle->flags |= NVMAP_HANDLE_VISITED;

		unique_handles[count] = nvmap_handle_get(ref->handle);

		/* Duplicate handle */
		atomic_inc(&ref->dupes);
		nvmap_handle_get(ref->handle);
		unique_handle_refs[count] = ref;

		BUG_ON(!unique_handles[count]);
		count++;
	}

	nvmap_ref_unlock(client);

	if (err) {
		for (i = 0; i < count; i++) {
			unique_handles[i]->flags &= ~NVMAP_HANDLE_VISITED;
			/* pin ref */
			nvmap_handle_put(unique_handles[i]);
			/* ref count */
			atomic_dec(&unique_handle_refs[i]->dupes);
			nvmap_handle_put(unique_handles[i]);
		}
	}

	return err ? err : count;
}
Пример #16
0
/* pins a list of handle_ref objects; same conditions apply as to
 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
int nvmap_pin_ids(struct nvmap_client *client,
		  unsigned int nr, const unsigned long *ids)
{
	int ret = 0;
	unsigned int i;
	struct nvmap_handle **h = (struct nvmap_handle **)ids;
	struct nvmap_handle_ref *ref;

	/* to optimize for the common case (client provided valid handle
	 * references and the pin succeeds), increment the handle_ref pin
	 * count during validation. in error cases, the tree will need to
	 * be re-walked, since the handle_ref is discarded so that an
	 * allocation isn't required. if a handle_ref is not found,
	 * locally validate that the caller has permission to pin the handle;
	 * handle_refs are not created in this case, so it is possible that
	 * if the caller crashes after pinning a global handle, the handle
	 * will be permanently leaked. */
	nvmap_ref_lock(client);
	for (i = 0; i < nr; i++) {
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			atomic_inc(&ref->pin);
			nvmap_handle_get(h[i]);
		} else {
			struct nvmap_handle *verify;
			nvmap_ref_unlock(client);
			verify = nvmap_validate_get(client, ids[i]);
			if (verify) {
				nvmap_warn(client, "%s pinning unreferenced "
					   "handle %p\n",
					   current->group_leader->comm, h[i]);
			} else {
				ret = -EPERM;
				nr = i;
				break;
			}
			nvmap_ref_lock(client);
		}
		if (!h[i]->alloc) {
			ret = -EFAULT;
			nr = i + 1;
			break;
		}
	}
	nvmap_ref_unlock(client);

	if (ret)
		goto out;

	ret = mutex_lock_interruptible(&client->share->pin_lock);
	if (WARN_ON(ret))
		goto out;

	ret = wait_pin_array_locked(client, h, nr);

	mutex_unlock(&client->share->pin_lock);

	if (ret) {
		ret = -EINTR;
	} else {
		for (i = 0; i < nr; i++) {
			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
				map_iovmm_area(h[i]);
		}
	}

out:
	if (ret) {
		nvmap_ref_lock(client);
		for (i = 0; i < nr; i++) {
			if (!ids[i])
				continue;

			ref = _nvmap_validate_id_locked(client, ids[i]);
			if (!ref) {
				nvmap_warn(client, "%s freed handle %p "
					   "during pinning\n",
					   current->group_leader->comm,
					   (void *)ids[i]);
				continue;
			}
			atomic_dec(&ref->pin);
		}
		nvmap_ref_unlock(client);

		for (i = 0; i < nr; i++)
			if(h[i])
				nvmap_handle_put(h[i]);
	}

	return ret;
}