int s3cfb_unmap_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_CMA struct cma_info mem_info; int err; #endif #if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) return 0; #endif if (fix->smem_start) { #ifdef CONFIG_CMA err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; if (fix->smem_start >= mem_info.lower_bound && fix->smem_start <= mem_info.upper_bound) cma_free(fix->smem_start); #else dma_free_coherent(fbdev->dev, fix->smem_len, fb->screen_base, fix->smem_start); #endif fix->smem_start = 0; fix->smem_len = 0; dev_info(fbdev->dev, "[fb%d] video memory released\n", win->id); } return 0; }
int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size) { #ifdef CONFIG_S5P_MEM_CMA struct cma_info mem_info; int err; char cma_name[8]; #endif #ifdef CONFIG_S5P_MEM_CMA /* CMA */ sprintf(cma_name, "fimg2d"); err = cma_info(&mem_info, dev, 0); FIMG2D_DEBUG("[cma_info] start_addr : 0x%x, end_addr : 0x%x, " "total_size : 0x%x, free_size : 0x%x\n", mem_info.lower_bound, mem_info.upper_bound, mem_info.total_size, mem_info.free_size); if (err) { FIMG2D_ERROR("%s: get cma info failed\n", __func__); return -1; } *size = mem_info.total_size; *base = (dma_addr_t)cma_alloc (dev, cma_name, (size_t)(*size), 0); FIMG2D_DEBUG("size = 0x%x\n", *size); FIMG2D_DEBUG("*base phys= 0x%x\n", *base); FIMG2D_DEBUG("*base virt = 0x%x\n", (u32)phys_to_virt(*base)); #else *base = s5p_get_media_memory_bank(S5P_MDEV_FIMG2D, 0); #endif return 0; }
int s3cfb_map_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_CMA struct cma_info mem_info; int err; #endif #if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) return 0; #endif if (win->owner == DMA_MEM_OTHER) return 0; #ifdef CONFIG_CMA err = cma_info(&mem_info, fbdev->dev, CMA_REGION_VIDEO); if (err) return err; fix->smem_start = (dma_addr_t)cma_alloc #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION (fbdev->dev, "fimd_video", (size_t)PAGE_ALIGN(fix->smem_len), 0); #else (fbdev->dev, "fimd", (size_t)PAGE_ALIGN(fix->smem_len), 0); #endif if (IS_ERR_OR_NULL((char *)fix->smem_start)) { printk(KERN_ERR "fix->smem_start allocation fail (%x)\n", (int)fix->smem_start); return -1; } fb->screen_base = NULL; #else fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); #endif dev_info(fbdev->dev, "[fb%d] Alloc dma: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, fix->smem_len); win->owner = DMA_MEM_FIMD; return 0; }
int s3cfb_map_default_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb, int fimd_id) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_CMA struct cma_info mem_info; int err; #endif if (win->owner == DMA_MEM_OTHER) return 0; #ifdef CONFIG_CMA err = cma_info(&mem_info, fbdev->dev, CMA_REGION_FIMD); if (err) return err; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, CMA_REGION_FIMD, (size_t)fix->smem_len, 0); if (IS_ERR_VALUE(fix->smem_start)) { return -EBUSY; } fb->screen_base = cma_get_virt(fix->smem_start, fix->smem_len, 1); #elif defined(CONFIG_S5P_MEM_BOOTMEM) fix->smem_start = s5p_get_media_memory_bank(S5P_MDEV_FIMD, 1); fix->smem_len = s5p_get_media_memsize_bank(S5P_MDEV_FIMD, 1); fb->screen_base = ioremap_wc(fix->smem_start, fix->smem_len); #else fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); #endif if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; return 0; }
int s3cfb_map_default_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb, int fimd_id) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_S5P_MEM_CMA struct cma_info mem_info; unsigned int reserved_size; int err; #endif #if MALI_USE_UNIFIED_MEMORY_PROVIDER unsigned int arg = 0; #endif if (win->owner == DMA_MEM_OTHER) return 0; #ifdef CONFIG_S5P_MEM_CMA err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; reserved_size = mem_info.total_size; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, "fimd", (size_t)reserved_size, 0); fb->screen_base = cma_get_virt(fix->smem_start, reserved_size, 1); #elif defined(CONFIG_S5P_MEM_BOOTMEM) fix->smem_start = s5p_get_media_memory_bank(S5P_MDEV_FIMD, 0); fix->smem_len = s5p_get_media_memsize_bank(S5P_MDEV_FIMD, 0); fb->screen_base = phys_to_virt(fix->smem_start); #endif memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; #if MALI_USE_UNIFIED_MEMORY_PROVIDER if (s3cfb_ump_wrapper(win, fix, arg)) { dev_info(fbdev->dev, "[fb%d] : Wrapped UMP memory : %x\n" , win->id, (unsigned int)ump_wrapped_buffer); s3cfb_unmap_video_memory(fbdev, fb); return -ENOMEM; } #endif return 0; }
_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size) { /* MALI_SEC */ #if defined(MALI_SEC_MEM_VALIDATION) struct cma_info mem_info; #endif /* Check that no other MEM_VALIDATION resources exist */ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) { MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n")); return _MALI_OSK_ERR_FAULT; } /* MALI_SEC */ #if defined(MALI_SEC_MEM_VALIDATION) if (cma_info(&mem_info, &exynos4_device_pd[PD_G3D].dev, "fimd")) { MALI_PRINT_ERROR(("Failed to get framebuffer information from CMA\n")); return _MALI_OSK_ERR_FAULT; } else { start = mem_info.lower_bound; size = mem_info.total_size - mem_info.free_size; } #endif /* Check restrictions on page alignment */ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) || (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n")); return _MALI_OSK_ERR_FAULT; } mali_mem_validator.phys_base = start; mali_mem_validator.size = size; MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n", mali_mem_validator.phys_base, mali_mem_validator.size)); return _MALI_OSK_ERR_OK; }
int s3cfb_extdsp_map_default_video_memory(struct s3cfb_extdsp_global *fbdev, struct fb_info *fb, int extdsp_id) { struct fb_fix_screeninfo *fix = &fb->fix; #ifdef CONFIG_S5P_MEM_CMA struct cma_info mem_info; int err; #endif #ifdef CONFIG_S5P_MEM_CMA err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, "extdsp", (size_t)PAGE_ALIGN(fix->smem_len), 0); fb->screen_base = cma_get_virt(fix->smem_start, PAGE_ALIGN(fix->smem_len), 1); #else fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); #endif if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", 0, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); return 0; }
/* Allocate firmware */ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) { #if defined(CONFIG_VIDEOBUF2_CMA_PHYS) int err; struct cma_info mem_info_f, mem_info_a, mem_info_b; #endif unsigned int base_align; unsigned int firmware_size; void *alloc_ctx; mfc_debug_enter(); if (!dev) { mfc_err("no mfc device to run\n"); return -EINVAL; } base_align = dev->variant->buf_align->mfc_base_align; firmware_size = dev->variant->buf_size->firmware_code; alloc_ctx = dev->alloc_ctx[MFC_CMA_FW_ALLOC_CTX]; #if !defined(CONFIG_VIDEOBUF2_ION) if (s5p_mfc_bitproc_buf) { mfc_err("Attempting to allocate firmware when it seems that it is already loaded.\n"); return -ENOMEM; } #else if (s5p_mfc_bitproc_buf) return 0; #endif /* Get memory region information and check if it is correct */ #if defined(CONFIG_VIDEOBUF2_CMA_PHYS) err = cma_info(&mem_info_f, dev->v4l2_dev.dev, MFC_CMA_FW); mfc_debug(3, "Area \"%s\" is from %08x to %08x and has size %08x", "f", mem_info_f.lower_bound, mem_info_f.upper_bound, mem_info_f.total_size); if (err) { mfc_err("Couldn't get memory information from CMA.\n"); return -EINVAL; } err = cma_info(&mem_info_a, dev->v4l2_dev.dev, MFC_CMA_BANK1); mfc_debug(3, "Area \"%s\" is from %08x to %08x and has size %08x", "a", mem_info_a.lower_bound, mem_info_a.upper_bound, mem_info_a.total_size); if (err) { mfc_err("Couldn't get memory information from CMA.\n"); return -EINVAL; } if (mem_info_f.upper_bound > mem_info_a.lower_bound) { mfc_err("Firmware has to be " "allocated before memory for buffers (bank A).\n"); return -EINVAL; } #endif mfc_debug(2, "Allocating memory for firmware.\n"); #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if (dev->num_drm_inst) alloc_ctx = dev->alloc_ctx_fw; #endif s5p_mfc_bitproc_buf = s5p_mfc_mem_alloc_priv(alloc_ctx, firmware_size); if (IS_ERR(s5p_mfc_bitproc_buf)) { s5p_mfc_bitproc_buf = 0; printk(KERN_ERR "Allocating bitprocessor buffer failed\n"); return -ENOMEM; } s5p_mfc_bitproc_phys = s5p_mfc_mem_daddr_priv(s5p_mfc_bitproc_buf); if (s5p_mfc_bitproc_phys & ((1 << base_align) - 1)) { mfc_err("The base memory is not aligned to %dBytes.\n", (1 << base_align)); s5p_mfc_mem_free_priv(s5p_mfc_bitproc_buf); s5p_mfc_bitproc_phys = 0; s5p_mfc_bitproc_buf = 0; return -EIO; } if (!dev->num_drm_inst) { s5p_mfc_bitproc_virt = s5p_mfc_mem_vaddr_priv(s5p_mfc_bitproc_buf); mfc_debug(2, "Virtual address for FW: %08lx\n", (long unsigned int)s5p_mfc_bitproc_virt); if (!s5p_mfc_bitproc_virt) { mfc_err("Bitprocessor memory remap failed\n"); s5p_mfc_mem_free_priv(s5p_mfc_bitproc_buf); s5p_mfc_bitproc_phys = 0; s5p_mfc_bitproc_buf = 0; return -EIO; } } dev->port_a = s5p_mfc_bitproc_phys; #if defined(CONFIG_VIDEOBUF2_CMA_PHYS) if (IS_TWOPORT(dev)) { err = cma_info(&mem_info_b, dev->v4l2_dev.dev, MFC_CMA_BANK2); mfc_debug(3, "Area \"%s\" is from %08x to %08x and has size %08x", "b", mem_info_b.lower_bound, mem_info_b.upper_bound, mem_info_b.total_size); if (err) { mfc_err("Couldn't get memory information from CMA.\n"); return -EINVAL; } dev->port_b = mem_info_b.lower_bound; mfc_debug(2, "Port A: %08x Port B: %08x (FW: %08x size: %08x)\n", dev->port_a, dev->port_b, s5p_mfc_bitproc_phys, firmware_size); } else { mfc_debug(2, "Port : %08x (FW: %08x size: %08x)\n", dev->port_a, s5p_mfc_bitproc_phys, firmware_size); } #elif defined(CONFIG_VIDEOBUF2_ION) || defined(CONFIG_VIDEOBUF2_DMA_CMA) dev->port_b = s5p_mfc_bitproc_phys; mfc_debug(2, "Port A: %08x Port B: %08x (FW: %08x size: %08x)\n", dev->port_a, dev->port_b, s5p_mfc_bitproc_phys, firmware_size); #endif mfc_debug_leave(); return 0; }
int s3cfb_map_default_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb, int fimd_id) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_VCM struct cma_info mem_info; unsigned int reserved_size; int err; struct vcm_phys *phys = NULL; ump_dd_physical_block ump_memory_description; unsigned int device_virt_start = 0; int frame_size = fix->smem_len / CONFIG_FB_S3C_NR_BUFFERS; struct vcm_res *fb_dev_vcm_res[CONFIG_FB_S3C_NR_BUFFERS]; enum vcm_dev_id id; #else #ifdef CONFIG_S5P_MEM_CMA struct cma_info mem_info; unsigned int reserved_size; int err; #endif #endif #ifdef MALI_USE_UNIFIED_MEMORY_PROVIDER #ifdef CONFIG_VCM int i; unsigned int arg = 0; #ifdef CONFIG_UMP_VCM_ALLOC struct ump_vcm ump_vcm; #endif unsigned int arg = 0; #endif #endif if (win->owner == DMA_MEM_OTHER) return 0; #ifdef CONFIG_VCM phys = kmalloc(sizeof(*phys) + sizeof(*phys->parts), GFP_KERNEL); memset(phys, 0, sizeof(*phys) + sizeof(*phys->parts)); if (fimd_id == 0) id = VCM_DEV_FIMD0; else id = VCM_DEV_FIMD1; err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; reserved_size = fix->smem_len; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, "fimd", (size_t)reserved_size, 0); fb->screen_base = cma_get_virt(fix->smem_start, reserved_size, 1); fbdev->s5p_vcm = vcm_create_unified((SZ_64M), id, &s3cfb_vcm_driver); if (IS_ERR(fbdev->s5p_vcm)) return PTR_ERR(fbdev->s5p_vcm); if (vcm_activate(fbdev->s5p_vcm)) dev_info(fbdev->dev, "[fb%d] : VCM activated", win->id); phys->count = 1; phys->size = fix->smem_len; phys->free = NULL; phys->parts[0].size = fix->smem_len; phys->parts[0].start = fix->smem_start; win->s5p_vcm_res = vcm_map(fbdev->s5p_vcm, phys, 0); device_virt_start = win->s5p_vcm_res->start; for (i = 0; i < CONFIG_FB_S3C_NR_BUFFERS; i++) { fb_dev_vcm_res[i] = kzalloc(sizeof(struct vcm_res), GFP_KERNEL); win->s3cfb_vcm[i].dev_vcm_res = fb_dev_vcm_res[i]; win->s3cfb_vcm[i].dev_vcm_res->start = device_virt_start + frame_size * i; win->s3cfb_vcm[i].dev_vcm_res->bound_size = frame_size; win->s3cfb_vcm[i].dev_vcm_res->res_size = frame_size; win->s3cfb_vcm[i].dev_vcm = fbdev->s5p_vcm; win->s3cfb_vcm[i].dev_vcm_res->vcm = fbdev->s5p_vcm; if (IS_ERR(win->s3cfb_vcm[i].dev_vcm_res)) return -ENOMEM; } #else #ifdef CONFIG_S5P_MEM_CMA err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; reserved_size = mem_info.total_size; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, "fimd", (size_t)reserved_size, 0); fb->screen_base = cma_get_virt(fix->smem_start, reserved_size, 1); #elif defined(CONFIG_S5P_MEM_BOOTMEM) fix->smem_start = s5p_get_media_memory_bank(S5P_MDEV_FIMD, 1); fix->smem_len = s5p_get_media_memsize_bank(S5P_MDEV_FIMD, 1); fb->screen_base = ioremap_wc(fix->smem_start, fix->smem_len); #endif #endif memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; #if MALI_USE_UNIFIED_MEMORY_PROVIDER #ifdef CONFIG_VCM #ifdef CONFIG_UMP_VCM_ALLOC for (i = 0; i < CONFIG_FB_S3C_NR_BUFFERS; i++) { ump_vcm.vcm = win->s3cfb_vcm[i].dev_vcm; ump_vcm.vcm_res = win->s3cfb_vcm[i].dev_vcm_res; ump_vcm.dev_id = id; arg = (unsigned int)&ump_vcm; ump_memory_description.addr = fix->smem_start + ((fix->smem_len / CONFIG_FB_S3C_NR_BUFFERS) * i); ump_memory_description.size = fix->smem_len / CONFIG_FB_S3C_NR_BUFFERS; win->ump_wrapped_buffer[i] = ump_dd_handle_create_from_phys_blocks (&ump_memory_description, 1); if (ump_dd_vcm_attribute_set(win->ump_wrapped_buffer[i], arg)) return -ENOMEM; } #else if (s3cfb_ump_wrapper(fix, arg, 0, win)) { dev_info(fbdev->dev, "[fb%d] : Wrapped UMP memory : %x\n" , win->id, (unsigned int)ump_wrapped_buffer); s3cfb_unmap_video_memory(fbdev, fb); return -ENOMEM; } #endif #endif #endif return 0; }
int s3cfb_map_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_VCM struct fb_var_screeninfo *var = &fb->var; struct cma_info mem_info; unsigned int reserved_size; int err; struct vcm_phys *phys = NULL; unsigned int device_virt_start = 0; int frame_num = var->yres_virtual / var->yres; int frame_size = fix->smem_len / frame_num; struct vcm_res *fb_dev_vcm_res[frame_num]; enum vcm_dev_id id; struct ump_vcm ump_vcm; unsigned int arg = 0; int i; ump_dd_physical_block ump_memory_description; if (win->owner == DMA_MEM_OTHER) return 0; phys = kmalloc(sizeof(*phys) + sizeof(*phys->parts), GFP_KERNEL); memset(phys, 0, sizeof(*phys) + sizeof(*phys->parts)); if (win->id < 5) id = VCM_DEV_FIMD0; else id = VCM_DEV_FIMD1; err = cma_info(&mem_info, fbdev->dev, 0); if (ERR_PTR(err)) return -ENOMEM; reserved_size = fix->smem_len; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, "fimd", (size_t)reserved_size, 0); fb->screen_base = cma_get_virt(fix->smem_start, reserved_size, 1); phys->count = 1; phys->size = fix->smem_len; phys->free = NULL; phys->parts[0].size = fix->smem_len; phys->parts[0].start = fix->smem_start; win->s5p_vcm_res = vcm_map(fbdev->s5p_vcm, phys, 0); device_virt_start = win->s5p_vcm_res->start; for (i = 0; i < frame_num; i++) { fb_dev_vcm_res[i] = kzalloc(sizeof(struct vcm_res), GFP_KERNEL); win->s3cfb_vcm[i].dev_vcm_res = fb_dev_vcm_res[i]; win->s3cfb_vcm[i].dev_vcm_res->start = device_virt_start + frame_size * i; win->s3cfb_vcm[i].dev_vcm_res->bound_size = frame_size; win->s3cfb_vcm[i].dev_vcm_res->res_size = frame_size; win->s3cfb_vcm[i].dev_vcm = fbdev->s5p_vcm; win->s3cfb_vcm[i].dev_vcm_res->vcm = fbdev->s5p_vcm; if (IS_ERR(win->s3cfb_vcm[i].dev_vcm_res)) return -ENOMEM; } memset(fb->screen_base, 0, (fix->smem_len / frame_num)); win->owner = DMA_MEM_FIMD; #else if (win->owner == DMA_MEM_OTHER) return 0; fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; #endif #if MALI_USE_UNIFIED_MEMORY_PROVIDER #ifdef CONFIG_VCM #ifdef CONFIG_UMP_VCM_ALLOC for (i = 0; i < frame_num; i++) { ump_vcm.vcm = win->s3cfb_vcm[i].dev_vcm; ump_vcm.vcm_res = win->s3cfb_vcm[i].dev_vcm_res; ump_vcm.dev_id = id; arg = (unsigned int)&ump_vcm; ump_memory_description.addr = fix->smem_start + ((fix->smem_len / frame_num) * i); ump_memory_description.size = fix->smem_len / frame_num; win->ump_wrapped_buffer[i] = ump_dd_handle_create_from_phys_blocks (&ump_memory_description, 1); if (ump_dd_vcm_attribute_set(win->ump_wrapped_buffer[i], arg)) return -ENOMEM; } #else if (s3cfb_ump_wrapper(fix, arg, 0, win)) { dev_info(fbdev->dev, "[fb%d] : Wrapped UMP memory : %x\n" , win->id, (unsigned int)ump_wrapped_buffer); s3cfb_unmap_video_memory(fbdev, fb); return -ENOMEM; } #endif #endif #endif return 0; }
int mfc_init_mem_mgr(struct mfc_dev *dev) { int i; #if !defined(CONFIG_VIDEO_MFC_VCM_UMP) dma_addr_t base[MAX_ALLOCATION]; #else /* FIXME: for support user-side allocation. it's temporary solution */ struct vcm_res *hole; #endif #ifndef SYSMMU_MFC_ON size_t size; #endif #ifdef CONFIG_S5P_MEM_CMA struct cma_info cma_infos[2]; #ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION size_t bound_size; size_t available_size; size_t hole_size; #else int cma_index = 0; #endif #else unsigned int align_margin; #endif dev->mem_ports = MFC_MAX_MEM_PORT_NUM; memset(dev->mem_infos, 0, sizeof(dev->mem_infos)); #ifdef SYSMMU_MFC_ON #if defined(CONFIG_VIDEO_MFC_VCM_UMP) dev->vcm_info.sysmmu_vcm = vcm_create_unified( SZ_256M * dev->mem_ports, VCM_DEV_MFC, &mfc_vcm_driver); memcpy(&vcm_info, &dev->vcm_info, sizeof(struct mfc_vcm)); dev->mem_infos[0].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm, MFC_MEMSIZE_PORT_A, 0); if (IS_ERR(dev->mem_infos[0].vcm_s)) return PTR_ERR(dev->mem_infos[0].vcm_s); dev->mem_infos[0].base = ALIGN(dev->mem_infos[0].vcm_s->start, ALIGN_128KB); align_margin = dev->mem_infos[0].base - dev->mem_infos[0].vcm_s->start; /* FIXME: for offset operation. it's temporary solution */ /* dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin; */ dev->mem_infos[0].size = SZ_256M - align_margin; dev->mem_infos[0].addr = NULL; /* FIXME: for support user-side allocation. it's temporary solution */ if (MFC_MEMSIZE_PORT_A < SZ_256M) hole = vcm_reserve(dev->vcm_info.sysmmu_vcm, SZ_256M - MFC_MEMSIZE_PORT_A, 0); if (dev->mem_ports == 2) { dev->mem_infos[1].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm, MFC_MEMSIZE_PORT_B, 0); if (IS_ERR(dev->mem_infos[1].vcm_s)) { vcm_unreserve(dev->mem_infos[0].vcm_s); return PTR_ERR(dev->mem_infos[1].vcm_s); } dev->mem_infos[1].base = ALIGN(dev->mem_infos[1].vcm_s->start, ALIGN_128KB); align_margin = dev->mem_infos[1].base - dev->mem_infos[1].vcm_s->start; dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin; dev->mem_infos[1].addr = NULL; } /* FIXME: for support user-side allocation. it's temporary solution */ vcm_unreserve(hole); dev->fw.vcm_s = mfc_vcm_bind(dev->mem_infos[0].base, MFC_FW_SYSTEM_SIZE); if (IS_ERR(dev->fw.vcm_s)) return PTR_ERR(dev->fw.vcm_s); dev->fw.vcm_k = mfc_vcm_map(dev->fw.vcm_s->res.phys); if (IS_ERR(dev->fw.vcm_k)) { mfc_vcm_unbind(dev->fw.vcm_s, 0); return PTR_ERR(dev->fw.vcm_k); } /* FIXME: it's very tricky! MUST BE FIX */ dev->mem_infos[0].addr = (unsigned char *)dev->fw.vcm_k->start; #elif defined(CONFIG_S5P_VMEM) base[0] = MFC_FREEBASE; dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB); align_margin = dev->mem_infos[0].base - base[0]; dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin; dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base; if (dev->mem_ports == 2) { base[1] = dev->mem_infos[0].base + dev->mem_infos[0].size; dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB); align_margin = dev->mem_infos[1].base - base[1]; dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin; dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base; } dev->fw.vmem_cookie = s5p_vmem_vmemmap(MFC_FW_SYSTEM_SIZE, dev->mem_infos[0].base, dev->mem_infos[0].base + MFC_FW_SYSTEM_SIZE); if (!dev->fw.vmem_cookie) return -ENOMEM; #else /* not CONFIG_VIDEO_MFC_VCM_UMP && not CONFIG_S5P_VMEM */ /* kernel virtual memory allocator */ dev->mem_infos[0].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_A); if (dev->mem_infos[0].vmalloc_addr == NULL) return -ENOMEM; base[0] = (unsigned long)dev->mem_infos[0].vmalloc_addr; dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB); align_margin = dev->mem_infos[0].base - base[0]; dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin; dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base; if (dev->mem_ports == 2) { dev->mem_infos[1].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_B); if (dev->mem_infos[1].vmalloc_addr == NULL) { vfree(dev->mem_infos[0].vmalloc_addr); return -ENOMEM; } base[1] = (unsigned long)dev->mem_infos[1].vmalloc_addr; dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB); align_margin = dev->mem_infos[1].base - base[1]; dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin; dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base; } #endif /* end of CONFIG_VIDEO_MFC_VCM_UMP */ #else /* not SYSMMU_MFC_ON */ /* early allocator */ #if defined(CONFIG_S5P_MEM_CMA) #ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION if (cma_info(&cma_infos[0], dev->device, "A")) { mfc_info("failed to get CMA info of 'mfc-secure'\n"); return -ENOMEM; } if (cma_info(&cma_infos[1], dev->device, "B")) { mfc_info("failed to get CMA info of 'mfc-normal'\n"); return -ENOMEM; } if (cma_infos[0].lower_bound > cma_infos[1].lower_bound) { mfc_info("'mfc-secure' region must be lower than 'mfc-normal' region\n"); return -ENOMEM; } /* * available = secure + normal * bound = secure + hole + normal * hole = bound - available */ available_size = cma_infos[0].free_size + cma_infos[1].free_size; bound_size = cma_infos[1].upper_bound - cma_infos[0].lower_bound; hole_size = bound_size - available_size; mfc_dbg("avail: 0x%08x, bound: 0x%08x offset: 0x%08x, hole: 0x%08x\n", available_size, bound_size, MAX_MEM_OFFSET, hole_size); /* re-assign actually available size */ if (bound_size > MAX_MEM_OFFSET) { if (cma_infos[0].free_size > MAX_MEM_OFFSET) /* it will be return error */ available_size = MAX_MEM_OFFSET; else if ((cma_infos[0].free_size + hole_size) >= MAX_MEM_OFFSET) /* it will be return error */ available_size = cma_infos[0].free_size; else available_size -= (bound_size - MAX_MEM_OFFSET); } mfc_dbg("avail: 0x%08x\n", available_size); size = cma_infos[0].free_size; if (size > available_size) { mfc_info("'mfc-secure' region is too large (%d:%d)", size >> 10, MAX_MEM_OFFSET >> 10); return -ENOMEM; }
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct secmem_info *info = filp->private_data; static int nbufs = 0; switch (cmd) { case SECMEM_IOC_GET_CHUNK_NUM: { char **mname; nbufs = 0; for (mname = secmem_regions; *mname != NULL; mname++) nbufs++; if (nbufs == 0) return -ENOMEM; if (copy_to_user((void __user *)arg, &nbufs, sizeof(int))) return -EFAULT; break; } case SECMEM_IOC_CHUNKINFO: { struct cma_info cinfo; struct secchunk_info minfo; if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo))) return -EFAULT; memset(&minfo.name, 0, MAX_NAME_LEN); if (minfo.index < 0) return -EINVAL; if (minfo.index >= nbufs) { minfo.index = -1; /* No more memory region */ } else { if (cma_info(&cinfo, info->dev, secmem_regions[minfo.index])) return -EINVAL; minfo.base = cinfo.lower_bound; minfo.size = cinfo.total_size; memcpy(minfo.name, secmem_regions[minfo.index], MAX_NAME_LEN); } if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo))) return -EFAULT; break; } #if defined(CONFIG_ION) case SECMEM_IOC_GET_FD_PHYS_ADDR: { struct ion_client *client; struct secfd_info fd_info; struct ion_fd_data data; size_t len; if (copy_from_user(&fd_info, (int __user *)arg, sizeof(fd_info))) return -EFAULT; client = ion_client_create(ion_exynos, "DRM"); if (IS_ERR(client)) { pr_err("%s: Failed to get ion_client of DRM\n", __func__); return -ENOMEM; } data.fd = fd_info.fd; data.handle = ion_import_dma_buf(client, data.fd); pr_debug("%s: fd from user space = %d\n", __func__, fd_info.fd); if (IS_ERR(data.handle)) { pr_err("%s: Failed to get ion_handle of DRM\n", __func__); ion_client_destroy(client); return -ENOMEM; } if (ion_phys(client, data.handle, &fd_info.phys, &len)) { pr_err("%s: Failed to get phys. addr of DRM\n", __func__); ion_client_destroy(client); ion_free(client, data.handle); return -ENOMEM; } pr_debug("%s: physical addr from kernel space = 0x%08x\n", __func__, (unsigned int)fd_info.phys); ion_free(client, data.handle); ion_client_destroy(client); if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info))) return -EFAULT; break; } #endif case SECMEM_IOC_GET_DRM_ONOFF: smp_rmb(); if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int))) return -EFAULT; break; case SECMEM_IOC_SET_DRM_ONOFF: { int val = 0; if (copy_from_user(&val, (int __user *)arg, sizeof(int))) return -EFAULT; mutex_lock(&drm_lock); if ((info->drm_enabled && !val) || (!info->drm_enabled && val)) { /* * 1. if we enabled drm, then disable it * 2. if we don't already hdrm enabled, * try to enable it. */ drm_enable_locked(info, val); } mutex_unlock(&drm_lock); break; } case SECMEM_IOC_GET_CRYPTO_LOCK: { break; } case SECMEM_IOC_RELEASE_CRYPTO_LOCK: { break; } #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) case SECMEM_IOC_REQ_MIF_LOCK: { int req_mif_lock; if (copy_from_user(&req_mif_lock, (void __user *)arg, sizeof(int))) return -EFAULT; if (req_mif_lock) { pm_qos_update_request(&exynos5_secmem_mif_qos, 800000); pr_debug("%s: Get MIF lock successfully\n", __func__); } else { pm_qos_update_request(&exynos5_secmem_mif_qos, 0); pr_debug("%s: Release MIF lock successfully\n", __func__); } break; } #endif default: return -ENOTTY; } return 0; }