int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma) { struct exynos_mem *mem = (struct exynos_mem *)filp->private_data; bool cacheable = mem->cacheable; dma_addr_t start = vma->vm_pgoff << PAGE_SHIFT; u32 pfn = vma->vm_pgoff; u32 size = vma->vm_end - vma->vm_start; if (!cma_is_registered_region(start, size)) { pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n", __func__, size, start); return -EINVAL; } if (!cacheable) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_flags |= VM_RESERVED; vma->vm_ops = &exynos_mem_ops; if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) { pr_err("writable mapping must be shared\n"); return -EINVAL; } if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) { pr_err("mmap fail\n"); return -EINVAL; } vma->vm_ops->open(vma); return 0; }
int jpeg_mmap(struct file *filp, struct vm_area_struct *vma) { #if defined(CONFIG_S5P_SYSMMU_JPEG) #if !defined(CONFIG_S5P_VMEM) unsigned long page_frame_no; unsigned long start; unsigned long size; char *ptr; /* vmalloc */ size = vma->vm_end - vma->vm_start; ptr = (char *)jpeg_ctrl->mem.base; start = 0; vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); while (size > 0) { page_frame_no = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, page_frame_no, PAGE_SIZE, vma->vm_page_prot)) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } #endif /* CONFIG_S5P_VMEM */ #else unsigned long page_frame_no; unsigned long size; int ret; size = vma->vm_end - vma->vm_start; if (!cma_is_registered_region(jpeg_ctrl->mem.base, size)) { pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n", __func__, (unsigned int)size, jpeg_ctrl->mem.base); return -EINVAL; } vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); page_frame_no = __phys_to_pfn(jpeg_ctrl->mem.base); ret = remap_pfn_range(vma, vma->vm_start, page_frame_no, size, vma->vm_page_prot); if (ret != 0) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } #endif /* SYSMMU_JPEG_ON */ return 0; }
static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op) { size_t left = length; phys_addr_t begin = start; if (!cma_is_registered_region(start, length)) { pr_err("[%s] handling non-cma region (%#x@%#x)is prohibited\n", __func__, length, start); return; } if (!soc_is_exynos5250() && !soc_is_exynos5210()) { if (length > (size_t) L1_FLUSH_ALL) { flush_cache_all(); smp_call_function( (smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); goto outer_cache_ops; } } #ifdef CONFIG_HIGHMEM do { size_t len; struct page *page; void *vaddr; off_t offset; page = phys_to_page(start); offset = offset_in_page(start); len = PAGE_SIZE - offset; if (left < len) len = left; if (PageHighMem(page)) { vaddr = kmap(page); cache_maint_inner(vaddr + offset, len, op); kunmap(page); } else { vaddr = page_address(page) + offset; cache_maint_inner(vaddr, len, op); } left -= len; start += len; } while (left); #else cache_maint_inner(phys_to_virt(begin), left, op); #endif outer_cache_ops: switch (op) { case EM_CLEAN: outer_clean_range(begin, begin + length); break; case EM_INV: if (length <= L2_FLUSH_ALL) { outer_inv_range(begin, begin + length); break; } /* else FALL THROUGH */ case EM_FLUSH: outer_flush_range(begin, begin + length); break; } }
int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params) { unsigned long pgd; int need_dst_clean = true; if ((params->src_rect.addr == NULL) || (params->dst_rect.addr == NULL)) { FIMG2D_ERROR("error : addr Null\n"); return false; } if (params->flag.memory_type == G2D_MEMORY_KERNEL) { #if defined(CONFIG_S5P_MEM_CMA) if (!cma_is_registered_region((unsigned int)params->src_rect.addr, GET_RECT_SIZE(params->src_rect))) { printk(KERN_ERR "[%s] SRC Surface is not included in CMA region\n", __func__); return -1; } if (!cma_is_registered_region((unsigned int)params->dst_rect.addr, GET_RECT_SIZE(params->dst_rect))) { printk(KERN_ERR "[%s] DST Surface is not included in CMA region\n", __func__); return -1; } #endif params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr); params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr); pgd = (unsigned long)init_mm.pgd; } else { pgd = (unsigned long)current->mm->pgd; } if (params->flag.memory_type == G2D_MEMORY_USER) { g2d_clip clip_src; g2d_clip_for_src(¶ms->src_rect, ¶ms->dst_rect, ¶ms->clip, &clip_src); if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip)) return false; g2d_dev->src_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect), (unsigned int)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); if (g2d_dev->src_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Src is not in valid pagetable\n"); return false; } g2d_dev->dst_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Dst is not in valid pagetable\n"); return false; } g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect), (u32)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (u32)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (params->flag.render_mode & G2D_CACHE_OP) { /*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect), (void *)GET_START_ADDR(params->dst_rect), (unsigned int)GET_REAL_SIZE(params->src_rect), (unsigned int)GET_REAL_SIZE(params->dst_rect));*/ // need_dst_clean = g2d_check_need_dst_cache_clean(params); g2d_mem_inner_cache(params); g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean); } } s5p_sysmmu_set_tablebase_pgd(g2d_dev->dev, (u32)virt_to_phys((void *)pgd)); if(g2d_init_regs(g2d_dev, params) < 0) { return false; } /* Do bitblit */ g2d_start_bitblt(g2d_dev, params); if (!need_dst_clean) g2d_mem_outer_cache_inv(params); return true; }