Exemple #1
0
void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	unsigned long kaddr;
	pgprot_t prot;
	pte_t **pte;

	BUG_ON(!ref);
	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
	prot = nvmap_pgprot(h, pgprot_kernel);
	pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
	if (!pte)
		goto out;

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	set_pte_at(&init_mm, kaddr, *pte,
				pfn_pte(__phys_to_pfn(paddr), prot));
	flush_tlb_kernel_page(kaddr);
	return (void *)kaddr;
out:
	nvmap_handle_put(ref->handle);
	return NULL;
}
Exemple #2
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
/* returns a tegra_iovmm_area for a handle. if the handle already has
 * an iovmm_area allocated, the handle is simply removed from its MRU list
 * and the existing iovmm_area is returned.
 *
 * if no existing allocation exists, try to allocate a new IOVMM area.
 *
 * if a new area can not be allocated, try to re-use the most-recently-unpinned
 * handle's allocation.
 *
 * and if that fails, iteratively evict handles from the MRU lists and free
 * their allocations, until the new allocation succeeds.
 */
struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
					    struct nvmap_handle *h)
{
	struct list_head *mru;
	struct nvmap_handle *evict = NULL;
	struct tegra_iovmm_area *vm = NULL;
	unsigned int i, idx;
	pgprot_t prot;

	BUG_ON(!h || !c || !c->share);

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->pgalloc.area) {
		BUG_ON(list_empty(&h->pgalloc.mru_list));
		list_del(&h->pgalloc.mru_list);
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return h->pgalloc.area;
	}

	vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);

	if (vm) {
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return vm;
	}
	/* attempt to re-use the most recently unpinned IOVMM area in the
	 * same size bin as the current handle. If that fails, iteratively
	 * evict handles (starting from the current bin) until an allocation
	 * succeeds or no more areas can be evicted */
	mru = mru_list(c->share, h->size);
	if (!list_empty(mru))
		evict = list_first_entry(mru, struct nvmap_handle,
					 pgalloc.mru_list);

	if (evict && evict->pgalloc.area->iovm_length >= h->size) {
		list_del(&evict->pgalloc.mru_list);
		vm = evict->pgalloc.area;
		evict->pgalloc.area = NULL;
		INIT_LIST_HEAD(&evict->pgalloc.mru_list);
		return vm;
	}

	idx = mru - c->share->mru_lists;

	for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
		if (idx >= c->share->nr_mru)
			idx = 0;
		mru = &c->share->mru_lists[idx];
		while (!list_empty(mru) && !vm) {
			evict = list_first_entry(mru, struct nvmap_handle,
						 pgalloc.mru_list);

			BUG_ON(atomic_read(&evict->pin) != 0);
			BUG_ON(!evict->pgalloc.area);
			list_del(&evict->pgalloc.mru_list);
			INIT_LIST_HEAD(&evict->pgalloc.mru_list);
			tegra_iovmm_free_vm(evict->pgalloc.area);
			evict->pgalloc.area = NULL;
			vm = tegra_iovmm_create_vm(c->share->iovmm,
						   NULL, h->size, prot);
		}
	}
	return vm;
}