Esempio n. 1
0
void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
		  void *addr)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	pte_t **pte;

	BUG_ON(!addr || !ref);
	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
	    h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
		dmac_flush_range(addr, addr + PAGE_SIZE);
		outer_flush_range(paddr, paddr + PAGE_SIZE);
	}

	pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
	nvmap_free_pte(nvmap_dev, pte);
	nvmap_handle_put(h);
}
Esempio n. 2
0
static int nvmap_page_pool_get_unused_pages(void)
{
	unsigned int i;
	int total = 0;
	struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);

	for (i = 0; i < NVMAP_NUM_POOLS; i++)
		total += nvmap_page_pool_get_available_count(&share->pools[i]);

	return total;
}
Esempio n. 3
0
void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
{
	struct nvmap_handle *h;

	if (!ref)
		return;

	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	/* Handle can be locked by cache maintenance in
	 * separate thread */
	if (h->heap_pgalloc) {
		vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
	} else {
Esempio n. 4
0
static int nvmap_page_pool_shrink(struct shrinker *shrinker,
				  struct shrink_control *sc)
{
	unsigned int i;
	unsigned int pool_offset;
	struct nvmap_page_pool *pool;
	int shrink_pages = sc->nr_to_scan;
	static atomic_t start_pool = ATOMIC_INIT(-1);
	struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);

	if (!shrink_pages)
		goto out;

	pr_debug("sh_pages=%d", shrink_pages);

	for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
		pool_offset = atomic_add_return(1, &start_pool) %
				NVMAP_NUM_POOLS;
		pool = &share->pools[pool_offset];
		shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
	}
out:
	return nvmap_page_pool_get_unused_pages();
}