STATIC int balong_ion_free_mem_to_buddy(void)
{
    int i;
    u32 fb_heap_phy = 0;
    struct ion_heap_info_data mem_data;

    if (0 != hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
        balongfb_loge("fail to get ION_FB_HEAP_ID\n");
        return -EINVAL;
    }

    if (0 == mem_data.heap_size) {
        balongfb_loge("fb reserved size 0\n");
        return -EINVAL;
    }

    fb_heap_phy = mem_data.heap_phy;
    for(i = 0; i < ((mem_data.heap_size)/PAGE_SIZE); i++){
        free_reserved_page(phys_to_page(mem_data.heap_phy));
#ifdef CONFIG_HIGHMEM
        if (PageHighMem(phys_to_page(mem_data.heap_phy)))
            totalhigh_pages += 1;
#endif
        mem_data.heap_phy += PAGE_SIZE;
    }

    memblock_free(fb_heap_phy, mem_data.heap_size);
    return 0;
}
static void bitfix_xor_page(phys_addr_t page_addr, u32 dest_cu)
{
	phys_addr_t dest_page_addr = (page_addr & ~CU_MASK) |
		(dest_cu << CU_OFFSET);
	u32 *virt_page = kmap_atomic(phys_to_page(page_addr));
	u32 *virt_dest_page = kmap_atomic(phys_to_page(dest_page_addr));

	BUG_ON(page_addr & ~PAGE_MASK);
	BUG_ON(dest_page_addr == page_addr);

	bitfix_xor32(virt_dest_page, virt_page, PAGE_SIZE);

	kunmap_atomic(virt_dest_page);
	kunmap_atomic(virt_page);
}
Esempio n. 3
0
void *cma_map_kernel(u32 phys_addr, size_t size)
{
	pgprot_t prot = __get_dma_pgprot(NULL, pgprot_kernel);
	struct page *page = phys_to_page(phys_addr);
	void *ptr = NULL;

if (unlikely(phys_addr < mem_start || phys_addr > mem_start + mem_size)) {
	pr_err("%s(%d) err: phys_addr 0x%x invalid!\n", __func__, __LINE__, phys_addr);
	return NULL;
}
//	BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));

	size = PAGE_ALIGN(size);

	if (PageHighMem(page)) {
		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, __builtin_return_address(0));
		if (!ptr) {
			pr_err("%s(%d) err: __dma_alloc_remap failed!\n", __func__, __LINE__);
			return NULL;
		}
	} else {
		__dma_remap(page, size, prot);
		ptr = page_address(page);
	}

	return ptr;
}
static inline void *memdump_remap_type(unsigned long phys_addr, size_t size,
				       pgprot_t pgprot)
{
	int i;
	u8 *vaddr;
	int npages = PAGE_ALIGN((phys_addr & (PAGE_SIZE - 1)) + size) >> PAGE_SHIFT;
	unsigned long offset = phys_addr & (PAGE_SIZE - 1);
	struct page **pages;
	pages = vmalloc(sizeof(struct page *) * npages);
	if (!pages) {
		printk(KERN_ERR "%s: vmalloc return NULL!\n", __func__);
		return NULL;
	}
	pages[0] = phys_to_page(phys_addr);
	for (i = 0; i < npages - 1; i++) {
		pages[i + 1] = pages[i] + 1;
	}
	vaddr = (u8 *) vmap(pages, npages, VM_MAP, pgprot);
	if (vaddr == 0) {
		printk(KERN_ERR "%s: vmap return NULL!\n", __func__);
	} else {
		vaddr += offset;
	}
	vfree(pages);
	return (void *)vaddr;
}
int msm_iommu_map_extra(struct iommu_domain *domain,
				unsigned long start_iova,
				unsigned long size,
				unsigned long page_size,
				int cached)
{
	int ret = 0;
	int i = 0;
	unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
	unsigned long temp_iova = start_iova;
	if (page_size == SZ_4K) {
		struct scatterlist *sglist;
		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
		struct page *dummy_page = phys_to_page(phy_addr);

		sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
		if (!sglist) {
			ret = -ENOMEM;
			goto out;
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
		if (ret) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
		}

		kfree(sglist);
	} else {
Esempio n. 6
0
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
	struct page *pg;
	unsigned long flags;
	unsigned char *map;
	unsigned char *ptr;

	/*
	 * On i386, a DMA handle is the "physical" address of a page.
	 * In other words, the bus address is equal to physical address.
	 * There is no IOMMU.
	 */
	pg = phys_to_page(dma_addr);

	/*
	 * We are called from hardware IRQs in case of callbacks.
	 * But we can be called from softirq or process context in case
	 * of submissions. In such case, we need to protect KM_IRQ0.
	 */
	local_irq_save(flags);
	map = kmap_atomic(pg, KM_IRQ0);
	ptr = map + (dma_addr & (PAGE_SIZE-1));
	memcpy(dst, ptr, len);
	kunmap_atomic(map, KM_IRQ0);
	local_irq_restore(flags);
	return 0;
}
Esempio n. 7
0
static int omap_tiler_alloc_dynamicpages(struct omap_tiler_info *info)
{
	int i;
	int ret;
	struct page *pg;

	for (i = 0; i < info->n_phys_pages; i++) {
		pg = alloc_page(GFP_KERNEL | GFP_DMA | GFP_HIGHUSER);
		if (!pg) {
			ret = -ENOMEM;
			pr_err("%s: alloc_page failed\n",
				__func__);
			goto err_page_alloc;
		}
		info->phys_addrs[i] = page_to_phys(pg);
		dmac_flush_range((void *)page_address(pg),
			(void *)page_address(pg) + PAGE_SIZE);
		outer_flush_range(info->phys_addrs[i],
			info->phys_addrs[i] + PAGE_SIZE);
	}
	return 0;

err_page_alloc:
	for (i -= 1; i >= 0; i--) {
		pg = phys_to_page(info->phys_addrs[i]);
		__free_page(pg);
	}
	return ret;
}
/**
 * Prepare for running bitfix.
 *
 * This will zero out bitfix memory in preparation for calling
 * bitfix_process_page() on pages.  It will also allocate some internal
 * temporary memory that will be freed with bitfix_finish.
 *
 * This should be called each time before suspend.
 *
 * This function must be called before bitfix_does_overlap_reserved().
 */
void bitfix_prepare(void)
{
	int i;

	if (!bitfix_enabled)
		return;

	/*
	 * Chunk size must match.  Set just in case someone was playing around
	 * with sysfs.
	 */
	s3c_pm_check_set_chunksize(CHUNK_SIZE);

	/*
	 * We'd like pm-check to give us chunks in an order that such that we
	 * process all chunks with the same destination one right after another.
	 */
	s3c_pm_check_set_interleave_bytes(1 << CU_OFFSET);

	/* Zero out the xor superchunk. */
	for (i = 0; i < UPPER_LOOPS; i++) {
		phys_addr_t base_addr = SDRAM_BASE + (i << UPPER_OFFSET);
		phys_addr_t xor_superchunk_addr = base_addr +
			(XOR_CU_NUM << CU_OFFSET);
		u32 pgnum;

		for (pgnum = 0; pgnum < (PAGES_PER_SUPERCHUNK); pgnum++) {
			phys_addr_t addr = xor_superchunk_addr +
				(pgnum * PAGE_SIZE);
			void *virt = kmap_atomic(phys_to_page(addr));
			memset(virt, 0, PAGE_SIZE);
			kunmap_atomic(virt);
		}
	}
}
Esempio n. 9
0
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
    unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
	unsigned long flags;
	unsigned int step_len;
	struct page *pg;
	unsigned char *map;
	unsigned long page_off, page_len;

	local_irq_save(flags);
	while (length) {
		/* compute number of bytes we are going to copy in this page */
		step_len = length;
		page_off = dma_addr & (PAGE_SIZE-1);
		page_len = PAGE_SIZE - page_off;
		if (page_len < step_len)
			step_len = page_len;

		/* copy data and advance pointers */
		pg = phys_to_page(dma_addr);
		map = kmap_atomic(pg, KM_IRQ0);
		offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
		kunmap_atomic(map, KM_IRQ0);
		dma_addr += step_len;
		length -= step_len;
	}
	local_irq_restore(flags);
}
static void *__alloc_from_pool(size_t size, struct page **ret_pages, gfp_t flags)
{
	unsigned long val;
	void *ptr = NULL;
	int count = size >> PAGE_SHIFT;
	int i;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
		for (i = 0; i < count ; i++) {
			ret_pages[i] = phys_to_page(phys);
			phys += 1 << PAGE_SHIFT;
		}
		ptr = (void *)val;
		memset(ptr, 0, size);
	}

	return ptr;
}
Esempio n. 11
0
struct dma_pinned_list *dma_pin_kernel_iovec_pages(struct iovec *iov, size_t len)
{
	struct dma_pinned_list *local_list;
	struct page **pages;
	int i, j;
	int nr_iovecs = 0;
	int iovec_len_used = 0;
	int iovec_pages_used = 0;

	/* determine how many iovecs/pages there are, up front */
	do {
		iovec_len_used += iov[nr_iovecs].iov_len;
		iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
		nr_iovecs++;
	} while (iovec_len_used < len);

	/* single kmalloc for pinned list, page_list[], and the page arrays */
	local_list = kmalloc(sizeof(*local_list)
		+ (nr_iovecs * sizeof (struct dma_page_list))
		+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
	if (!local_list)
		goto out;

	/* list of pages starts right after the page list array */
	pages = (struct page **) &local_list->page_list[nr_iovecs];

	local_list->nr_iovecs = 0;

	for (i = 0; i < nr_iovecs; i++) {
		struct dma_page_list *page_list = &local_list->page_list[i];
		int offset;

		len -= iov[i].iov_len;

		if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
			goto unpin;

		page_list->nr_pages = num_pages_spanned(&iov[i]);
		page_list->base_address = iov[i].iov_base;

		page_list->pages = pages;
		pages += page_list->nr_pages;

		for (offset=0, j=0; j < page_list->nr_pages; j++, offset+=PAGE_SIZE) {
			page_list->pages[j] = phys_to_page(__pa((unsigned int)page_list->base_address) + offset);
		}
		local_list->nr_iovecs = i + 1;
	}

	return local_list;

unpin:
	kfree(local_list);
out:
	return NULL;
}
Esempio n. 12
0
static void omap_tiler_free_dynamicpages(struct omap_tiler_info *info)
{
	int i;
	struct page *pg;

	for (i = 0; i < info->n_phys_pages; i++) {
		pg = phys_to_page(info->phys_addrs[i]);
		__free_page(pg);
	}
	return;
}
Esempio n. 13
0
/*
 * Due to conflicting restrictions on the placement of the framebuffer,
 * the bootloader is likely to leave the framebuffer pointed at a location
 * in memory that is outside the grhost aperture.  This function will move
 * the framebuffer contents from a physical address that is anywher (lowmem,
 * highmem, or outside the memory map) to a physical address that is outside
 * the memory map.
 */
void tegra_move_framebuffer(unsigned long to, unsigned long from,
	unsigned long size)
{
	struct page *page;
	void __iomem *to_io;
	void *from_virt;
	unsigned long i;

	BUG_ON(PAGE_ALIGN((unsigned long)to) != (unsigned long)to);
	BUG_ON(PAGE_ALIGN(from) != from);
	BUG_ON(PAGE_ALIGN(size) != size);

	to_io = ioremap(to, size);
	if (!to_io) {
		pr_err("%s: Failed to map target framebuffer\n", __func__);
		return;
	}

	if (pfn_valid(page_to_pfn(phys_to_page(from)))) {
		for (i = 0 ; i < size; i += PAGE_SIZE) {
			page = phys_to_page(from + i);
			from_virt = kmap(page);
			memcpy(to_io + i, from_virt, PAGE_SIZE);
			kunmap(page);
		}
	} else {
		void __iomem *from_io = ioremap(from, size);
		if (!from_io) {
			pr_err("%s: Failed to map source framebuffer\n",
				__func__);
			goto out;
		}

		for (i = 0; i < size; i += 4)
			writel(readl(from_io + i), to_io + i);

		iounmap(from_io);
	}
out:
	iounmap(to_io);
}
Esempio n. 14
0
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replaced by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = phys_to_page((u32)handle);
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return 0;
}
Esempio n. 15
0
void cma_unmap_kernel(u32 phys_addr, size_t size, void *cpu_addr)
{
	struct page *page = phys_to_page(phys_addr);

	BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));

	size = PAGE_ALIGN(size);

	if (PageHighMem(page))
		__dma_free_remap(cpu_addr, size);
	else
		__dma_remap(page, size, pgprot_kernel);
}
void __iomem *mem_vmap(phys_addr_t pa, size_t size, struct page *pages[])
{
	unsigned int num_pages = (size >> PAGE_SHIFT);
	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
	int i;

	for (i = 0; i < num_pages; i++) {
		pages[i] = phys_to_page(pa);
		pa += PAGE_SIZE;
	}

	return vmap(pages, num_pages, VM_MAP, prot);
}
Esempio n. 17
0
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replaced by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	struct scatterlist *sg;
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg = sgt->sgl;
	sg_set_page(sg, page, PAGE_ALIGN(size), 0);
	sg_dma_address(sg) = sg_phys(sg);

	return 0;
}
Esempio n. 18
0
static int do_op(unsigned long addr, int len, int is_write,
		 int (*op)(unsigned long addr, int len, void *arg), void *arg)
{
	struct page *page;
	int n;

	addr = maybe_map(addr, is_write);
	if(addr == -1UL)
		return(-1);

	page = phys_to_page(addr);
	addr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	n = (*op)(addr, len, arg);
	kunmap(page);

	return(n);
}
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
				struct dma_attrs *attrs)
{
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return;
	}

	freed = dma_release_from_contiguous(dev,
					phys_to_page(paddr),
					PAGE_ALIGN(size) >> PAGE_SHIFT);
	if (!freed)
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
					      struct ion_buffer *buffer)
{
	struct scatterlist *sglist;
	struct page *page = phys_to_page(buffer->priv_phys);

	if (page == NULL)
		return NULL;

	sglist = vmalloc(sizeof(*sglist));
	if (!sglist)
		return ERR_PTR(-ENOMEM);

	sg_init_table(sglist, 1);
	sg_set_page(sglist, page, buffer->size, 0);

	return sglist;
}
/**
 * Recover a given chunk to recover_chunk, which should already be cleared.
 *
 * @failed_chunk: Address of the start of the chunk that failed (we'll recover
 *	data from this chunk to recover_chunk).
 * @should_skip_fn: This will be called one page at a time.  If a page was never
 *	processed with calls to bitfix_process_page() then the should_skip_fn
 *	_must_ return true.  This means that the skip function must call the
 *	bitfix_does_overlap_reserved() function.
 */
static void _bitfix_recover_chunk(phys_addr_t failed_chunk,
				  bitfix_should_skip_fn_t should_skip_fn)
{
	const u32 failed_cu = bitfix_get_cu(failed_chunk);
	u32 cu;
	size_t offset;

	for (cu = 0; cu < CU_COUNT; cu++) {
		phys_addr_t this_chunk = (failed_chunk & ~CU_MASK) |
			(cu << CU_OFFSET);

		/* Don't include the failed corruption unit in our xor */
		if (cu == failed_cu)
			continue;

		for (offset = 0; offset < CHUNK_SIZE; offset += PAGE_SIZE) {
			phys_addr_t this_page = this_chunk + offset;
			u32 *virt_page;

			/*
			 * Don't include blocks that were skipped (never passed
			 * to bitfix_process_page()).  Except blocks in the xor
			 * corruption unit.
			 *
			 * should_skip_fn() will return true for the xor
			 * corruption unit but we do still need to process
			 * those pages now.
			 *
			 * should_skip_fn() will return true for them because it
			 * needs to incorporate bitfix_ does_overlap_reserved()
			 * and that will return true for the xor corruption
			 * unit).
			 */
			if ((cu != XOR_CU_NUM) && should_skip_fn(this_page))
				continue;

			virt_page = kmap_atomic(phys_to_page(this_page));
			bitfix_xor32(&recover_chunk[offset / sizeof(u32)],
				     virt_page, PAGE_SIZE);
			kunmap_atomic(virt_page);
		}
	}
}
int msm_iommu_map_extra(struct iommu_domain *domain,
				unsigned long start_iova,
				phys_addr_t phy_addr,
				unsigned long size,
				unsigned long page_size,
				int prot)
{
	int ret = 0;
	int i = 0;
	unsigned long temp_iova = start_iova;
	/* the extra "padding" should never be written to. map it
	 * read-only. */
	prot &= ~IOMMU_WRITE;

	if (msm_iommu_page_size_is_supported(page_size)) {
		struct scatterlist *sglist;
		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
		struct page *dummy_page = phys_to_page(phy_addr);
		size_t map_ret;

		sglist = vmalloc(sizeof(*sglist) * nrpages);
		if (!sglist) {
			ret = -ENOMEM;
			goto out;
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		map_ret = iommu_map_sg(domain, temp_iova, sglist, nrpages,
					prot);
		if (map_ret != size) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
			ret = -EINVAL;
		} else {
			ret = 0;
		}

		vfree(sglist);
	} else {
Esempio n. 23
0
void cma_free_phys(u32 phys_addr, size_t size)
{
	struct page *pages;
	size_t count;

if (unlikely(phys_addr < mem_start || phys_addr > mem_start + mem_size)) {
	pr_err("%s(%d) err: phys_addr 0x%x invalid!\n", __func__, __LINE__, phys_addr);
	return;
}
//	BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));

	size = PAGE_ALIGN(size);
	count = size >> PAGE_SHIFT;

	pages = phys_to_page((u32)phys_addr);
	if (!dma_release_from_contiguous(NULL, pages, count)) {
		pr_err("%s(%d) err: dma_release_from_contiguous failed!\n", __func__, __LINE__);
		return;
	}
}
static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
	}

	return ptr;
}
Esempio n. 25
0
void preload_balance_init(struct kbase_device *kbdev)
{
	int j = 0;

	struct page *p = phys_to_page(mali_balance.base);
	int npages = PAGE_ALIGN(mali_balance.size) / PAGE_SIZE;
	struct page **pages = vmalloc(sizeof(struct page *) * npages);
	struct page **tmp = pages;

	for (j = 0; j < npages; j++ ) {
		*(tmp++) = p++;
	}

	mali_balance.stream_reg = (unsigned char *)vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
	__flush_dcache_area(mali_balance.stream_reg, streamout_size);

	memset(mali_balance.stream_reg, 0x0, mali_balance.size);
	memcpy(mali_balance.stream_reg, streamout, streamout_size);

	vfree(pages);
}
Esempio n. 26
0
void __iomem *mipi_lli_vmap(phys_addr_t phys_addr, phys_addr_t size)
{
	int i;
	struct page **pages;
	unsigned int num_pages = (unsigned int)(size >> PAGE_SHIFT);
	void *pv;

	pages = kmalloc(num_pages * sizeof(*pages), GFP_KERNEL);
	if (!pages)
		return NULL;

	for (i = 0; i < num_pages; i++) {
		pages[i] = phys_to_page(phys_addr);
		phys_addr += PAGE_SIZE;
	}

	pv = vmap(pages, num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
	kfree(pages);

	return (void __iomem *)pv;
}
Esempio n. 27
0
/*
 * use sunxi_map_kernel to map phys addr to kernel space, instead of ioremap,
 * which cannot be used for mem_reserve areas.
 */
void *sunxi_map_kernel(unsigned int phys_addr, unsigned int size)
{
	int npages = PAGE_ALIGN(size) / PAGE_SIZE;
	struct page **pages = vmalloc(sizeof(struct page *) * npages);
	struct page **tmp = pages;
	struct page *cur_page = phys_to_page(phys_addr);
	pgprot_t pgprot;
	void *vaddr;
	int i;

	if(!pages)
		return 0;

	for(i = 0; i < npages; i++)
		*(tmp++) = cur_page++;

	pgprot = pgprot_noncached(PAGE_KERNEL);
	vaddr = vmap(pages, npages, VM_MAP, pgprot);

	vfree(pages);
	return vaddr;
}
Esempio n. 28
0
static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
{
	size_t left = length;
	phys_addr_t begin = start;

	if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
		if (length > (size_t) L1_FLUSH_ALL) {
			flush_cache_all();
			smp_call_function(
					(smp_call_func_t)__cpuc_flush_kern_all,
					NULL, 1);

			goto outer_cache_ops;
		}
	}

#ifdef CONFIG_HIGHMEM
	do {
		size_t len;
		struct page *page;
		void *vaddr;
		off_t offset;

		page = phys_to_page(start);
		offset = offset_in_page(start);
		len = PAGE_SIZE - offset;

		if (left < len)
			len = left;

		if (PageHighMem(page)) {
			vaddr = kmap(page);
			cache_maint_inner(vaddr + offset, len, op);
			kunmap(page);
		} else {
			vaddr = page_address(page) + offset;
			cache_maint_inner(vaddr, len, op);
		}
		left -= len;
		start += len;
	} while (left);
#else
	cache_maint_inner(phys_to_virt(begin), left, op);
#endif

outer_cache_ops:
	switch (op) {
	case EM_CLEAN:
		outer_clean_range(begin, begin + length);
		break;
	case EM_INV:
		if (length <= L2_FLUSH_ALL) {
			outer_inv_range(begin, begin + length);
			break;
		}
		/* else FALL THROUGH */
	case EM_FLUSH:
		outer_flush_range(begin, begin + length);
		break;
	}
}
Esempio n. 29
0
struct page* m4u_phys_to_page(unsigned int phys)
{
    return phys_to_page(phys);
}
Esempio n. 30
0
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	int ret = 0;
	enum dma_attr attr;
	unsigned int nr_pages;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

	init_dma_attrs(&buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(flags & ROCKCHIP_BO_NONCONTIG))
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	dma_set_attr(attr, &buf->dma_attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
	}

	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
	if (!buf->sgt) {
		DRM_ERROR("failed to get sg table.\n");
		ret = -ENOMEM;
		goto err_free_attrs;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;

err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->pages,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;

	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

	return ret;
}