static struct ion_client *vb2_ion_init_ion(struct vb2_ion *ion, struct vb2_drv *drv) { struct ion_client *client; int ret; int mask = ION_HEAP_EXYNOS_MASK | ION_HEAP_EXYNOS_CONTIG_MASK | ION_HEAP_EXYNOS_USER_MASK; client = ion_client_create(ion_exynos, mask, ion->name); if (IS_ERR(client)) { pr_err("ion_client_create: ion_name(%s)\n", ion->name); return ERR_PTR(-EINVAL); } if (!drv->use_mmu) return client; ret = iovmm_setup(ion->dev); if (ret) { pr_err("iovmm_setup: ion_name(%s)\n", ion->name); ion_client_destroy(client); return ERR_PTR(-EINVAL); } return client; }
MemoryHeapIon::MemoryHeapIon(int fd, size_t size, uint32_t flags, uint32_t offset):MemoryHeapBase() { void* base = NULL; int dup_fd = -1; mIonClient = ion_client_create(); if (mIonClient < 0) { ALOGE("MemoryHeapIon : ION client creation failed : %s", strerror(errno)); mIonClient = -1; } else { if (fd >= 0) { dup_fd = dup(fd); if (dup_fd == -1) { ALOGE("MemoryHeapIon : cannot dup fd (size[%u], fd[%d]) : %s", size, fd, strerror(errno)); } else { flags |= USE_ION_FD; base = ion_map(dup_fd, size, 0); if (base != MAP_FAILED) { init(dup_fd, base, size, flags, NULL); } else { ALOGE("MemoryHeapIon : ION mmap failed(size[%u], fd[%d]): %s", size, fd, strerror(errno)); ion_free(dup_fd); } } } else { ALOGE("MemoryHeapIon : fd parameter error(fd : %d)", fd); } } }
uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER], size_t allocsize, uint32_t count) { int ret = TUI_DCI_ERR_INTERNAL_ERROR; dma_addr_t buf_addr; ion_phys_addr_t phys_addr; unsigned long offset = 0; unsigned int size; size=allocsize*(count+1); client = ion_client_create(ion_exynos, "TUI module"); handle = ion_alloc(client, size, 0, EXYNOS_ION_HEAP_EXYNOS_CONTIG_MASK, ION_EXYNOS_VIDEO_MASK); dbuf = ion_share_dma_buf(client, handle); buf_addr = decon_map_sec_dma_buf(dbuf, 0); ion_phys(client, handle, (unsigned long *)&phys_addr, &dbuf->size); /* TUI frame buffer must be aligned 16M */ if(phys_addr % 0x1000000){ offset = 0x1000000 - (phys_addr % 0x1000000); } phys_addr = phys_addr+offset; va = buf_addr + offset; printk("buf_addr : %x\n",va); printk("phys_addr : %lx\n",phys_addr); #if 0 // this is testing. MUST BE REMOVE void *kernel_addr; //kernel_addr = (void*)ion_map_kernel(client, handle); kernel_addr = phys_to_virt(phys_addr+0x2000000); *((u32*)kernel_addr) = va; printk("DATA ON phys_addr : addr[%lx] val[%x]\n" ,phys_addr+0x2000000 ,*((u32*)kernel_addr)); #endif g_tuiMemPool.pa = phys_addr; g_tuiMemPool.size = allocsize*count; if ((size_t)(allocsize*count) <= g_tuiMemPool.size) { allocbuffer[0].pa = (uint64_t) g_tuiMemPool.pa; allocbuffer[1].pa = (uint64_t) (g_tuiMemPool.pa + g_tuiMemPool.size/2); }else{ /* requested buffer is bigger than the memory pool, return an error */ pr_debug("%s(%d): %s\n", __func__, __LINE__, "Memory pool too small"); ret = TUI_DCI_ERR_INTERNAL_ERROR; return ret; } ret = TUI_DCI_OK; return ret; }
PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, IMG_PVOID pvOSData) { ENV_CONNECTION_DATA *psEnvConnection; #if defined(SUPPORT_ION) ENV_ION_CONNECTION_DATA *psIonConnection; #endif *phOsPrivateData = OSAllocMem(sizeof(ENV_CONNECTION_DATA)); if (*phOsPrivateData == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__)); return PVRSRV_ERROR_OUT_OF_MEMORY; } psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData; OSMemSet(psEnvConnection, 0, sizeof(*psEnvConnection)); /* Save the pointer to our struct file */ psEnvConnection->psFile = pvOSData; #if defined(SUPPORT_ION) psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocMem(sizeof(ENV_ION_CONNECTION_DATA)); if (psIonConnection == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__)); return PVRSRV_ERROR_OUT_OF_MEMORY; } OSMemSet(psIonConnection, 0, sizeof(*psIonConnection)); psEnvConnection->psIonData = psIonConnection; /* We can have more then one connection per process so we need more then the PID to have a unique name */ psEnvConnection->psIonData->psIonDev = IonDevAcquire(); OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentProcessIDKM()); psEnvConnection->psIonData->psIonClient = ion_client_create(psEnvConnection->psIonData->psIonDev, psEnvConnection->psIonData->azIonClientName); if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient)) { PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create " "ion client for per connection data")); return PVRSRV_ERROR_OUT_OF_MEMORY; } psEnvConnection->psIonData->ui32IonClientRefCount = 1; #endif /* SUPPORT_ION */ return PVRSRV_OK; }
PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData) { PVRSRV_ERROR eError; IMG_HANDLE hBlockAlloc; PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_ENV_PER_PROCESS_DATA), phOsPrivateData, &hBlockAlloc, "Environment per Process Data"); if (eError != PVRSRV_OK) { *phOsPrivateData = IMG_NULL; PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError)); return eError; } psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData; OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc)); psEnvPerProc->hBlockAlloc = hBlockAlloc; /* Linux specific mmap processing */ LinuxMMapPerProcessConnect(psEnvPerProc); #if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) /* Linked list of PVRSRV_FILE_PRIVATE_DATA structures */ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead); #endif #if defined(SUPPORT_ION) OSSNPrintf(psEnvPerProc->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%d", OSGetCurrentProcessIDKM()); psEnvPerProc->psIONClient = ion_client_create(gpsIonDev, psEnvPerProc->azIonClientName); if (IS_ERR_OR_NULL(psEnvPerProc->psIONClient)) { PVR_DPF((PVR_DBG_ERROR, "OSPerProcessPrivateDataInit: Couldn't create " "ion client for per process data")); return PVRSRV_ERROR_OUT_OF_MEMORY; } #endif /* defined(SUPPORT_ION) */ return PVRSRV_OK; }
static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; char debug_name[64]; pr_debug("%s: %d\n", __func__, __LINE__); snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); client = ion_client_create(dev, -1, debug_name); if (IS_ERR_OR_NULL(client)) return PTR_ERR(client); file->private_data = client; return 0; }
MemoryHeapIon::MemoryHeapIon(size_t size, uint32_t flags, char const *name):MemoryHeapBase() { void* base = NULL; int fd = -1; uint32_t isReadOnly, heapMask, flagMask; mIonClient = ion_client_create(); if (mIonClient < 0) { ALOGE("MemoryHeapIon : ION client creation failed : %s", strerror(errno)); mIonClient = -1; } else { isReadOnly = flags & (IMemoryHeap::READ_ONLY); heapMask = ion_HeapMask_valid_check(flags); flagMask = ion_FlagMask_valid_check(flags); if (heapMask) { ALOGD("MemoryHeapIon : Allocated with size:%d, heap:0x%X , flag:0x%X", size, heapMask, flagMask); fd = ion_alloc(mIonClient, size, 0, heapMask, flagMask); if (fd < 0) { ALOGE("MemoryHeapIon : ION Reserve memory allocation failed(size[%u]) : %s", size, strerror(errno)); if (errno == ENOMEM) { // Out of reserve memory. So re-try allocating in system heap ALOGD("MemoryHeapIon : Re-try Allocating in default heap - SYSTEM heap"); fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP); } } } else { fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP); ALOGD("MemoryHeapIon : Allocated with default heap - SYSTEM heap"); } flags = isReadOnly | heapMask | flagMask; if (fd < 0) { ALOGE("MemoryHeapIon : ION memory allocation failed(size[%u]) : %s", size, strerror(errno)); } else { flags |= USE_ION_FD; base = ion_map(fd, size, 0); if (base != MAP_FAILED) { init(fd, base, size, flags, NULL); } else { ALOGE("MemoryHeapIon : ION mmap failed(size[%u], fd[%d]) : %s", size, fd, strerror(errno)); ion_free(fd); } } } }
static int __init omap_imt_init(void) { int r = -EINVAL; DBG("omap_imt_init"); r = register_rpmsg_driver(&rpmsg_driver); #ifdef CONFIG_ION_OMAP imt_ion_client = ion_client_create(omap_ion_device, (1<< ION_HEAP_TYPE_CARVEOUT) | (1 << OMAP_ION_HEAP_TYPE_TILER), "omapimt"); #endif return r; }
void *vb2_ion_create_context(struct device *dev, size_t alignment, long flags) { struct vb2_ion_context *ctx; unsigned int heapmask = ion_heapflag(flags); struct ion_device *ion_dev = get_global_ion_device(); if (!ion_dev) { pr_err("%s error: can't get global ion device!!!\n", __func__); return ERR_PTR(-EINVAL); } /* * ion_client_create() expects the current thread to be a kernel thread * to create a new ion_client */ WARN_ON(!(current->group_leader->flags & PF_KTHREAD)); /* * only support continuous memory */ if (flags & VB2ION_CTX_VMCONTIG) { pr_err("%s error: not support vmalloc ion context\n", __func__); return ERR_PTR(-EINVAL); } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { pr_err("%s error: fail to kzalloc size: %d\n", __func__, sizeof(*ctx)); return ERR_PTR(-ENOMEM); } ctx->dev = dev; ctx->client = ion_client_create(ion_dev, dev_name(dev)); if (IS_ERR(ctx->client)) { void *retp = ctx->client; kfree(ctx); return retp; } vb2_ion_set_alignment(ctx, alignment); return ctx; }
static int gralloc_map(gralloc_module_t const* module, buffer_handle_t handle, void** vaddr) { private_handle_t* hnd = (private_handle_t*)handle; if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) { if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_IOCTL) { size_t size = FIMC1_RESERVED_SIZE * 1024; void *mappedAddress = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, gMemfd, (hnd->paddr - hnd->offset)); if (mappedAddress == MAP_FAILED) { ALOGE("Could not mmap %s fd(%d)", strerror(errno),hnd->fd); return -errno; } hnd->base = intptr_t(mappedAddress) + hnd->offset; } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION) { size_t size = hnd->size; hnd->ion_client = ion_client_create(); void *mappedAddress = ion_map(hnd->fd, size, 0); if (mappedAddress == MAP_FAILED) { ALOGE("Could not ion_map %s fd(%d)", strerror(errno), hnd->fd); return -errno; } hnd->base = intptr_t(mappedAddress) + hnd->offset; } else { size_t size = hnd->size; #if PMEM_HACK size += hnd->offset; #endif void *mappedAddress = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0); if (mappedAddress == MAP_FAILED) { ALOGE("Could not mmap %s fd(%d)", strerror(errno),hnd->fd); return -errno; } hnd->base = intptr_t(mappedAddress) + hnd->offset; } } *vaddr = (void*)hnd->base; return 0; }
static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; char debug_name[64]; char task_comm[TASK_COMM_LEN]; pr_debug("%s: %d\n", __func__, __LINE__); if (current->group_leader->flags & PF_KTHREAD) { snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); } else { strcpy(debug_name, get_task_comm(task_comm, current->group_leader)); } client = ion_client_create(dev, -1, debug_name); if (IS_ERR_OR_NULL(client)) return PTR_ERR(client); file->private_data = client; return 0; }
/* * IOCTL operation; Import fd to UMP memory */ int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data * session_data) { _ump_uk_ion_import_s user_interaction; ump_dd_handle *ump_handle; ump_dd_physical_block * blocks; unsigned long num_blocks; struct ion_handle *ion_hnd; struct scatterlist *sg; struct scatterlist *sg_ion; unsigned long i = 0; ump_session_memory_list_element * session_memory_element = NULL; if (ion_client_ump==NULL) ion_client_ump = ion_client_create(ion_exynos, -1, "ump"); /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n")); return -ENOTTY; } /* Copy the user space memory to kernel space (so we safely can read it) */ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; /* translate fd to secure ID*/ ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd); sg_ion = ion_map_dma(ion_client_ump,ion_hnd); blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024); if (NULL == blocks) { MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n")); return -ENOMEM; } sg = sg_ion; do { blocks[i].addr = sg_phys(sg); blocks[i].size = sg_dma_len(sg); i++; if (i>=1024) { _mali_osk_free(blocks); MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n")); return -EFAULT; } sg = sg_next(sg); } while(sg); num_blocks = i; /* Initialize the session_memory_element, and add it to the session object */ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element)); if (NULL == session_memory_element) { _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks); if (UMP_DD_HANDLE_INVALID == ump_handle) { _mali_osk_free(session_memory_element); _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } session_memory_element->mem = (ump_dd_mem*)ump_handle; _mali_osk_mutex_wait(session_data->lock); _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list)); _mali_osk_mutex_signal(session_data->lock); ion_unmap_dma(ion_client_ump,ion_hnd); ion_free(ion_client_ump, ion_hnd); _mali_osk_free(blocks); user_interaction.secure_id = ump_dd_secure_id_get(ump_handle); user_interaction.size = ump_dd_size_get(ump_handle); user_interaction.ctx = NULL; if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n")); return -EFAULT; } return 0; /* success */ }
static long tf_device_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { int result = S_SUCCESS; struct tf_connection *connection; union tf_command command; struct tf_command_header header; union tf_answer answer; u32 command_size; u32 answer_size; void *user_answer; dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n", file, ioctl_num, (void *) ioctl_param); switch (ioctl_num) { case IOCTL_TF_GET_VERSION: /* ioctl is asking for the driver interface version */ result = TF_DRIVER_INTERFACE_VERSION; goto exit; #ifdef CONFIG_TF_ION case IOCTL_TF_ION_REGISTER: { int ion_register; /* ioctl is asking to register an ion handle */ if (copy_from_user(&ion_register, (int *) ioctl_param, sizeof(int))) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "copy_from_user failed\n", file); result = -EFAULT; goto exit; } connection = tf_conn_from_file(file); BUG_ON(connection == NULL); /* Initialize ION connection */ if (connection->ion_client == NULL) { connection->ion_client = ion_client_create( omap_ion_device, (1 << ION_HEAP_TYPE_CARVEOUT), "smc"); } if (connection->ion_client == NULL) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "unable to create ion client\n", file); result = -EFAULT; goto exit; } /* * TODO: We should use a reference count on this handle in order * to not unregistered it while using it. */ return (long)ion_import_fd(connection->ion_client, ion_register); } case IOCTL_TF_ION_UNREGISTER: { int ion_register; /* ioctl is asking to unregister an ion handle */ if (copy_from_user(&ion_register, (int *) ioctl_param, sizeof(int))) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "copy_from_user failed\n", file); result = -EFAULT; goto exit; } connection = tf_conn_from_file(file); BUG_ON(connection == NULL); if (connection->ion_client == NULL) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "ion client does not exist\n", file); result = -EFAULT; goto exit; } ion_free(connection->ion_client, (struct ion_handle *) ion_register); return S_SUCCESS; } #endif case IOCTL_TF_EXCHANGE: /* * ioctl is asking to perform a message exchange with the Secure * Module */ /* * Make a local copy of the data from the user application * This routine checks the data is readable * * Get the header first. */ if (copy_from_user(&header, (struct tf_command_header *)ioctl_param, sizeof(struct tf_command_header))) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "Cannot access ioctl parameter %p\n", file, (void *) ioctl_param); result = -EFAULT; goto exit; } /* size in words of u32 */ command_size = header.message_size + sizeof(struct tf_command_header)/sizeof(u32); if (command_size > sizeof(command)/sizeof(u32)) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "Buffer overflow: too many bytes to copy %d\n", file, command_size); result = -EFAULT; goto exit; } if (copy_from_user(&command, (union tf_command *)ioctl_param, command_size * sizeof(u32))) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "Cannot access ioctl parameter %p\n", file, (void *) ioctl_param); result = -EFAULT; goto exit; } connection = tf_conn_from_file(file); BUG_ON(connection == NULL); /* * The answer memory space address is in the operation_id field */ user_answer = (void *) command.header.operation_id; atomic_inc(&(connection->pending_op_count)); dprintk(KERN_WARNING "tf_device_ioctl(%p): " "Sending message type 0x%08x\n", file, command.header.message_type); switch (command.header.message_type) { case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION: result = tf_open_client_session(connection, &command, &answer); break; case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION: result = tf_close_client_session(connection, &command, &answer); break; case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY: result = tf_register_shared_memory(connection, &command, &answer); break; case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY: result = tf_release_shared_memory(connection, &command, &answer); break; case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND: result = tf_invoke_client_command(connection, &command, &answer); break; case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND: result = tf_cancel_client_command(connection, &command, &answer); break; default: dprintk(KERN_ERR "tf_device_ioctl(%p): " "Incorrect message type (0x%08x)!\n", connection, command.header.message_type); result = -EOPNOTSUPP; break; } atomic_dec(&(connection->pending_op_count)); if (result != 0) { dprintk(KERN_WARNING "tf_device_ioctl(%p): " "Operation returning error code 0x%08x)!\n", file, result); goto exit; } /* * Copy the answer back to the user space application. * The driver does not check this field, only copy back to user * space the data handed over by Secure World */ answer_size = answer.header.message_size + sizeof(struct tf_answer_header)/sizeof(u32); if (copy_to_user(user_answer, &answer, answer_size * sizeof(u32))) { dprintk(KERN_WARNING "tf_device_ioctl(%p): " "Failed to copy back the full command " "answer to %p\n", file, user_answer); result = -EFAULT; goto exit; } /* successful completion */ dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file); break; case IOCTL_TF_GET_DESCRIPTION: { /* ioctl asking for the version information buffer */ struct tf_version_information_buffer *pInfoBuffer; dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n", file, ioctl_num, (void *) ioctl_param); pInfoBuffer = ((struct tf_version_information_buffer *) ioctl_param); dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: " "driver_description=\"%64s\"\n", S_VERSION_STRING); if (copy_to_user(pInfoBuffer->driver_description, S_VERSION_STRING, strlen(S_VERSION_STRING) + 1)) { dprintk(KERN_ERR "tf_device_ioctl(%p): " "Fail to copy back the driver description " "to %p\n", file, pInfoBuffer->driver_description); result = -EFAULT; goto exit; } dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: " "secure_world_description=\"%64s\"\n", tf_get_description(&g_tf_dev.sm)); if (copy_to_user(pInfoBuffer->secure_world_description, tf_get_description(&g_tf_dev.sm), TF_DESCRIPTION_BUFFER_LENGTH)) { dprintk(KERN_WARNING "tf_device_ioctl(%p): " "Failed to copy back the secure world " "description to %p\n", file, pInfoBuffer->secure_world_description); result = -EFAULT; goto exit; } break; } default: dprintk(KERN_ERR "tf_device_ioctl(%p): " "Unknown IOCTL code 0x%08x!\n", file, ioctl_num); result = -EOPNOTSUPP; goto exit; } exit: return result; }
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct secmem_info *info = filp->private_data; static int nbufs = 0; switch (cmd) { case SECMEM_IOC_GET_CHUNK_NUM: { nbufs = sizeof(secmem_regions) / sizeof(uint32_t); if (nbufs == 0) return -ENOMEM; if (copy_to_user((void __user *)arg, &nbufs, sizeof(int))) return -EFAULT; break; } case SECMEM_IOC_CHUNKINFO: { struct secchunk_info minfo; if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo))) return -EFAULT; memset(&minfo.name, 0, MAX_NAME_LEN); if (minfo.index < 0) return -EINVAL; if (minfo.index >= nbufs) { minfo.index = -1; /* No more memory region */ } else { if (ion_exynos_contig_heap_info(secmem_regions[minfo.index], &minfo.base, &minfo.size)) return -EINVAL; memcpy(minfo.name, secmem_regions_name[minfo.index], MAX_NAME_LEN); } if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo))) return -EFAULT; break; } #if defined(CONFIG_ION) case SECMEM_IOC_GET_FD_PHYS_ADDR: { struct ion_client *client; struct secfd_info fd_info; struct ion_fd_data data; size_t len; if (copy_from_user(&fd_info, (int __user *)arg, sizeof(fd_info))) return -EFAULT; client = ion_client_create(ion_exynos, "DRM"); if (IS_ERR(client)) { pr_err("%s: Failed to get ion_client of DRM\n", __func__); return -ENOMEM; } data.fd = fd_info.fd; data.handle = ion_import_dma_buf(client, data.fd); pr_debug("%s: fd from user space = %d\n", __func__, fd_info.fd); if (IS_ERR(data.handle)) { pr_err("%s: Failed to get ion_handle of DRM\n", __func__); ion_client_destroy(client); return -ENOMEM; } if (ion_phys(client, data.handle, &fd_info.phys, &len)) { pr_err("%s: Failed to get phys. addr of DRM\n", __func__); ion_client_destroy(client); ion_free(client, data.handle); return -ENOMEM; } pr_debug("%s: physical addr from kernel space = 0x%08x\n", __func__, (unsigned int)fd_info.phys); ion_free(client, data.handle); ion_client_destroy(client); if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info))) return -EFAULT; break; } #endif case SECMEM_IOC_GET_DRM_ONOFF: smp_rmb(); if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int))) return -EFAULT; break; case SECMEM_IOC_SET_DRM_ONOFF: { int ret, val = 0; if (copy_from_user(&val, (int __user *)arg, sizeof(int))) return -EFAULT; mutex_lock(&drm_lock); if ((info->drm_enabled && !val) || (!info->drm_enabled && val)) { /* * 1. if we enabled drm, then disable it * 2. if we don't already hdrm enabled, * try to enable it. */ ret = drm_enable_locked(info, val); if (ret < 0) pr_err("fail to lock/unlock drm status. lock = %d\n", val); } mutex_unlock(&drm_lock); break; } case SECMEM_IOC_GET_CRYPTO_LOCK: { break; } case SECMEM_IOC_RELEASE_CRYPTO_LOCK: { break; } case SECMEM_IOC_SET_TZPC: { #if !defined(CONFIG_SOC_EXYNOS5422) && !defined(CONFIG_SOC_EXYNOS5430) struct protect_info prot; if (copy_from_user(&prot, (void __user *)arg, sizeof(struct protect_info))) return -EFAULT; mutex_lock(&smc_lock); exynos_smc((uint32_t)(0x81000000), 0, prot.dev, prot.enable); mutex_unlock(&smc_lock); #endif break; } default: return -ENOTTY; } return 0; }
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg) { /* enable secure world mode : TZASC */ int ret = 0; if (!kbdev) goto secure_out; if (!kbdev->secure_mode_support) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__); ret = -EINVAL; goto secure_out; } if (!reg) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n", __func__, reg); goto secure_out; } #if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) #if MALI_SEC_ASP_SECURE_BUF_CTRL { struct ion_client *client; struct ion_handle *ion_handle; size_t len = 0; ion_phys_addr_t phys = 0; flush_all_cpu_caches(); if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) { reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE; } else { client = ion_client_create(ion_exynos, "G3D"); if (IS_ERR(client)) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n", __func__); goto secure_out; } ion_handle = ion_import_dma_buf(client, ion_fd); if (IS_ERR(ion_handle)) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n", __func__); ion_client_destroy(client); goto secure_out; } if (ion_phys(client, ion_handle, &phys, &len)) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n", __func__); ion_free(client, ion_handle); ion_client_destroy(client); goto secure_out; } ion_free(client, ion_handle); ion_client_destroy(client); ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D); if (ret != DRMDRV_OK) { GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n", __func__, (unsigned int)phys, ret); BUG(); } reg->flags |= KBASE_REG_SECURE; } reg->phys_by_ion = phys; reg->len_by_ion = len; } #else reg->flags |= KBASE_REG_SECURE; reg->phys_by_ion = 0; reg->len_by_ion = 0; #endif #else GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__); ret = -EINVAL; #endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) return ret; secure_out: ret = -EINVAL; return ret; }
static int __devinit gpu_probe(struct platform_device *pdev) { int ret = -ENODEV; struct resource* res; gcmkHEADER(); res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "gpu_irq"); if (!res) { printk(KERN_ERR "%s: No irq line supplied.\n",__FUNCTION__); goto gpu_probe_fail; } irqLine = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpu_base"); if (!res) { printk(KERN_ERR "%s: No register base supplied.\n",__FUNCTION__); goto gpu_probe_fail; } registerMemBase = res->start; registerMemSize = res->end - res->start + 1; #if MRVL_USE_GPU_RESERVE_MEM gcmkPRINT(KERN_INFO "[galcore] info: GC use memblock to reserve video memory.\n"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpu_mem"); if (!res) { printk(KERN_ERR "%s: No gpu reserved memory supplied. res = %p\n",__FUNCTION__, res); goto gpu_probe_fail; } contiguousBase = res->start; contiguousSize = res->end - res->start + 1; #endif printk("\n[galcore] GC Version: %s\n", _GC_VERSION_STRING_); printk("\ncontiguousBase:%08x, contiguousSize:%08x\n", (gctUINT32)contiguousBase, (gctUINT32)contiguousSize); pdevice = &pdev->dev; ret = drv_init(); if (!ret) { platform_set_drvdata(pdev, galDevice); #if MRVL_CONFIG_PROC create_gc_proc_file(); #endif create_gc_sysfs_file(pdev); #if MRVL_CONFIG_ENABLE_GPUFREQ __enable_gpufreq(galDevice); #endif #if MRVL_CONFIG_ENABLE_EARLYSUSPEND register_early_suspend(&gpu_early_suspend_handler); #endif #if (MRVL_VIDEO_MEMORY_USE_TYPE == gcdMEM_TYPE_ION) #if (gcdMEM_TYPE_IONAF_3_4_39 == 1) gc_ion_client = ion_client_create(pxa_ion_dev, "gc ion"); #else gc_ion_client = ion_client_create(pxa_ion_dev, ION_HEAP_CARVEOUT_MASK, "gc ion"); #endif #endif #if MRVL_CONFIG_USE_PM_RUNTIME pm_runtime_enable(&pdev->dev); pm_runtime_forbid(&pdev->dev); #endif /* save device pointer to GALDEVICE */ galDevice->dev = &pdev->dev; gcmkFOOTER_NO(); return ret; } gpu_probe_fail: gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret); return ret; }
struct ion_client *hisi_ion_client_create(const char *name) { return ion_client_create(idev, name); }
void BpMemoryHeap::assertReallyMapped() const { if (mHeapId == -1) { // remote call without mLock held, worse case scenario, we end up // calling transact() from multiple threads, but that's not a problem, // only mmap below must be in the critical section. Parcel data, reply; data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor()); status_t err = remote()->transact(HEAP_ID, data, &reply); int parcel_fd = reply.readFileDescriptor(); ssize_t size = reply.readInt32(); uint32_t flags = reply.readInt32(); uint32_t offset = reply.readInt32(); ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%ld, err=%d (%s)", asBinder().get(), parcel_fd, size, err, strerror(-err)); #ifdef USE_V4L2_ION int ion_client = -1; if (flags & USE_ION_FD) { ion_client = ion_client_create(); ALOGE_IF(ion_client < 0, "BpMemoryHeap : ion client creation error"); } #endif int fd = dup( parcel_fd ); ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%ld, err=%d (%s)", parcel_fd, size, err, strerror(errno)); int access = PROT_READ; if (!(flags & READ_ONLY)) { access |= PROT_WRITE; } Mutex::Autolock _l(mLock); if (mHeapId == -1) { mRealHeap = true; #ifdef USE_V4L2_ION if (flags & USE_ION_FD) { if (ion_client < 0) mBase = MAP_FAILED; else mBase = ion_map(fd, size, offset); } else #endif mBase = mmap(0, size, access, MAP_SHARED, fd, offset); if (mBase == MAP_FAILED) { ALOGE("cannot map BpMemoryHeap (binder=%p), size=%ld, fd=%d (%s)", asBinder().get(), size, fd, strerror(errno)); close(fd); } else { mSize = size; mFlags = flags; mOffset = offset; android_atomic_write(fd, &mHeapId); } } #ifdef USE_V4L2_ION if (ion_client < 0) ion_client = -1; else ion_client_destroy(ion_client); #endif } }
/* * Invokes a client command to the Secure World */ int tf_invoke_client_command( struct tf_connection *connection, union tf_command *command, union tf_answer *answer) { int error = 0; struct tf_shmem_desc *shmem_desc[4] = {NULL}; int i; #ifdef CONFIG_TF_ION struct ion_handle *new_handle = NULL; #endif /* CONFIG_TF_ION */ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection); command->release_shared_memory.message_size = (sizeof(struct tf_command_invoke_client_command) - sizeof(struct tf_command_header)) / 4; #ifdef CONFIG_TF_ZEBRA error = tf_crypto_try_shortcuted_update(connection, (struct tf_command_invoke_client_command *) command, (struct tf_answer_invoke_client_command *) answer); if (error == 0) return error; #endif /* Map the tmprefs */ for (i = 0; i < 4; i++) { int param_type = TF_GET_PARAM_TYPE( command->invoke_client_command.param_types, i); if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG | TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG)) == TF_PARAM_TYPE_MEMREF_FLAG) { /* A temporary memref: map it */ error = tf_map_temp_shmem(connection, &command->invoke_client_command. params[i].temp_memref, param_type, &shmem_desc[i]); if (error != 0) { dprintk(KERN_ERR "tf_invoke_client_command: " "unable to map temporary memory " "block\n (%08X)", error); goto error; } } #ifdef CONFIG_TF_ION else if (param_type == TF_PARAM_TYPE_MEMREF_ION_HANDLE) { struct tf_command_invoke_client_command *invoke; ion_phys_addr_t ion_addr; size_t ion_len; struct ion_buffer *buffer; if (connection->ion_client == NULL) { connection->ion_client = ion_client_create( zebra_ion_device, (1 << ION_HEAP_TYPE_CARVEOUT), "tf"); } if (connection->ion_client == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to create ion client\n", __func__, connection); error = -EFAULT; goto error; } invoke = &command->invoke_client_command; dprintk(KERN_INFO "ion_handle %x", invoke->params[i].value.a); buffer = ion_share(connection->ion_client, (struct ion_handle *)invoke->params[i].value.a); if (buffer == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to share ion handle\n", __func__, connection); error = -EFAULT; goto error; } dprintk(KERN_INFO "ion_buffer %p", buffer); new_handle = ion_import(connection->ion_client, buffer); if (new_handle == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to import ion buffer\n", __func__, connection); error = -EFAULT; goto error; } dprintk(KERN_INFO "new_handle %x", new_handle); error = ion_phys(connection->ion_client, new_handle, &ion_addr, &ion_len); if (error) { dprintk(KERN_ERR "%s: unable to convert ion handle " "0x%08X (error code 0x%08X)\n", __func__, new_handle, error); error = -EINVAL; goto error; } dprintk(KERN_INFO "%s: handle=0x%08x phys_add=0x%08x length=0x%08x\n", __func__, invoke->params[i].value.a, ion_addr, ion_len); invoke->params[i].value.a = (u32) ion_addr; invoke->params[i].value.b = (u32) ion_len; invoke->param_types &= ~((0xF) << (4*i)); invoke->param_types |= TF_PARAM_TYPE_VALUE_INPUT << (4*i); } #endif /* CONFIG_TF_ION */ } command->invoke_client_command.device_context = connection->device_context; error = tf_send_receive(&connection->dev->sm, command, answer, connection, true); error: #ifdef CONFIG_TF_ION if (new_handle != NULL) ion_free(connection->ion_client, new_handle); #endif /* CONFIG_TF_ION */ /* Unmap de temp mem refs */ for (i = 0; i < 4; i++) { if (shmem_desc[i] != NULL) { dprintk(KERN_INFO "tf_invoke_client_command: " "UnMatemp_memref %d\n ", i); tf_unmap_shmem(connection, shmem_desc[i], 0); } } if (error != 0) dprintk(KERN_ERR "tf_invoke_client_command returns %d\n", error); else dprintk(KERN_ERR "tf_invoke_client_command returns " "error_code 0x%08X\n", answer->invoke_client_command.error_code); return error; }
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct secmem_info *info = filp->private_data; static int nbufs = 0; switch (cmd) { case SECMEM_IOC_GET_CHUNK_NUM: { char **mname; nbufs = 0; for (mname = secmem_regions; *mname != NULL; mname++) nbufs++; if (nbufs == 0) return -ENOMEM; if (copy_to_user((void __user *)arg, &nbufs, sizeof(int))) return -EFAULT; break; } case SECMEM_IOC_CHUNKINFO: { struct cma_info cinfo; struct secchunk_info minfo; if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo))) return -EFAULT; memset(&minfo.name, 0, MAX_NAME_LEN); if (minfo.index < 0) return -EINVAL; if (minfo.index >= nbufs) { minfo.index = -1; /* No more memory region */ } else { if (cma_info(&cinfo, info->dev, secmem_regions[minfo.index])) return -EINVAL; minfo.base = cinfo.lower_bound; minfo.size = cinfo.total_size; memcpy(minfo.name, secmem_regions[minfo.index], MAX_NAME_LEN); } if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo))) return -EFAULT; break; } #if defined(CONFIG_ION) case SECMEM_IOC_GET_FD_PHYS_ADDR: { struct ion_client *client; struct secfd_info fd_info; struct ion_fd_data data; size_t len; if (copy_from_user(&fd_info, (int __user *)arg, sizeof(fd_info))) return -EFAULT; client = ion_client_create(ion_exynos, "DRM"); if (IS_ERR(client)) { pr_err("%s: Failed to get ion_client of DRM\n", __func__); return -ENOMEM; } data.fd = fd_info.fd; data.handle = ion_import_dma_buf(client, data.fd); pr_debug("%s: fd from user space = %d\n", __func__, fd_info.fd); if (IS_ERR(data.handle)) { pr_err("%s: Failed to get ion_handle of DRM\n", __func__); ion_client_destroy(client); return -ENOMEM; } if (ion_phys(client, data.handle, &fd_info.phys, &len)) { pr_err("%s: Failed to get phys. addr of DRM\n", __func__); ion_client_destroy(client); ion_free(client, data.handle); return -ENOMEM; } pr_debug("%s: physical addr from kernel space = 0x%08x\n", __func__, (unsigned int)fd_info.phys); ion_free(client, data.handle); ion_client_destroy(client); if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info))) return -EFAULT; break; } #endif case SECMEM_IOC_GET_DRM_ONOFF: smp_rmb(); if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int))) return -EFAULT; break; case SECMEM_IOC_SET_DRM_ONOFF: { int val = 0; if (copy_from_user(&val, (int __user *)arg, sizeof(int))) return -EFAULT; mutex_lock(&drm_lock); if ((info->drm_enabled && !val) || (!info->drm_enabled && val)) { /* * 1. if we enabled drm, then disable it * 2. if we don't already hdrm enabled, * try to enable it. */ drm_enable_locked(info, val); } mutex_unlock(&drm_lock); break; } case SECMEM_IOC_GET_CRYPTO_LOCK: { break; } case SECMEM_IOC_RELEASE_CRYPTO_LOCK: { break; } #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) case SECMEM_IOC_REQ_MIF_LOCK: { int req_mif_lock; if (copy_from_user(&req_mif_lock, (void __user *)arg, sizeof(int))) return -EFAULT; if (req_mif_lock) { pm_qos_update_request(&exynos5_secmem_mif_qos, 800000); pr_debug("%s: Get MIF lock successfully\n", __func__); } else { pm_qos_update_request(&exynos5_secmem_mif_qos, 0); pr_debug("%s: Release MIF lock successfully\n", __func__); } break; } #endif default: return -ENOTTY; } return 0; }
struct ion_client *msm_ion_client_create(unsigned int heap_mask, const char *name) { return ion_client_create(idev, heap_mask, name); }
static int fimc_is_probe(struct platform_device *pdev) { struct exynos_platform_fimc_is *pdata; #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) struct resource *mem_res; struct resource *regs_res; #endif struct fimc_is_core *core; int ret = -ENODEV; #ifndef ENABLE_IS_CORE int i; #endif u32 stream; struct pinctrl_state *s; probe_info("%s:start(%ld, %ld)\n", __func__, sizeof(struct fimc_is_core), sizeof(struct fimc_is_video_ctx)); core = kzalloc(sizeof(struct fimc_is_core), GFP_KERNEL); if (!core) { probe_err("core is NULL"); return -ENOMEM; } fimc_is_dev = &pdev->dev; dev_set_drvdata(fimc_is_dev, core); pdata = dev_get_platdata(&pdev->dev); if (!pdata) { #ifdef CONFIG_OF ret = fimc_is_parse_dt(pdev); if (ret) { err("fimc_is_parse_dt is fail(%d)", ret); return ret; } pdata = dev_get_platdata(&pdev->dev); #else BUG(); #endif } #ifdef USE_ION_ALLOC core->fimc_ion_client = ion_client_create(ion_exynos, "fimc-is"); #endif core->pdev = pdev; core->pdata = pdata; core->current_position = SENSOR_POSITION_REAR; device_init_wakeup(&pdev->dev, true); /* for mideaserver force down */ atomic_set(&core->rsccount, 0); #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { probe_err("Failed to get io memory region(%p)", mem_res); goto p_err1; } regs_res = request_mem_region(mem_res->start, resource_size(mem_res), pdev->name); if (!regs_res) { probe_err("Failed to request io memory region(%p)", regs_res); goto p_err1; } core->regs_res = regs_res; core->regs = ioremap_nocache(mem_res->start, resource_size(mem_res)); if (!core->regs) { probe_err("Failed to remap io region(%p)", core->regs); goto p_err2; } #else core->regs_res = NULL; core->regs = NULL; #endif #ifdef ENABLE_IS_CORE core->irq = platform_get_irq(pdev, 0); if (core->irq < 0) { probe_err("Failed to get irq(%d)", core->irq); goto p_err3; } #endif ret = pdata->clk_get(&pdev->dev); if (ret) { probe_err("clk_get is fail(%d)", ret); goto p_err3; } ret = fimc_is_mem_probe(&core->resourcemgr.mem, core->pdev); if (ret) { probe_err("fimc_is_mem_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_resourcemgr_probe(&core->resourcemgr, core); if (ret) { probe_err("fimc_is_resourcemgr_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_interface_probe(&core->interface, &core->resourcemgr.minfo, (ulong)core->regs, core->irq, core); if (ret) { probe_err("fimc_is_interface_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_debug_probe(); if (ret) { probe_err("fimc_is_deubg_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_vender_probe(&core->vender); if (ret) { probe_err("fimc_is_vender_probe is fail(%d)", ret); goto p_err3; } /* group initialization */ ret = fimc_is_groupmgr_probe(&core->groupmgr); if (ret) { probe_err("fimc_is_groupmgr_probe is fail(%d)", ret); goto p_err3; } for (stream = 0; stream < FIMC_IS_STREAM_COUNT; ++stream) { ret = fimc_is_ischain_probe(&core->ischain[stream], &core->interface, &core->resourcemgr, &core->groupmgr, &core->resourcemgr.mem, core->pdev, stream); if (ret) { probe_err("fimc_is_ischain_probe(%d) is fail(%d)", stream, ret); goto p_err3; } #ifndef ENABLE_IS_CORE core->ischain[stream].hardware = &core->hardware; #endif } ret = v4l2_device_register(&pdev->dev, &core->v4l2_dev); if (ret) { dev_err(&pdev->dev, "failed to register fimc-is v4l2 device\n"); goto p_err3; } #ifdef SOC_30S /* video entity - 3a0 */ fimc_is_30s_video_probe(core); #endif #ifdef SOC_30C /* video entity - 3a0 capture */ fimc_is_30c_video_probe(core); #endif #ifdef SOC_30P /* video entity - 3a0 preview */ fimc_is_30p_video_probe(core); #endif #ifdef SOC_31S /* video entity - 3a1 */ fimc_is_31s_video_probe(core); #endif #ifdef SOC_31C /* video entity - 3a1 capture */ fimc_is_31c_video_probe(core); #endif #ifdef SOC_31P /* video entity - 3a1 preview */ fimc_is_31p_video_probe(core); #endif #ifdef SOC_I0S /* video entity - isp0 */ fimc_is_i0s_video_probe(core); #endif #ifdef SOC_I0C /* video entity - isp0 capture */ fimc_is_i0c_video_probe(core); #endif #ifdef SOC_I0P /* video entity - isp0 preview */ fimc_is_i0p_video_probe(core); #endif #ifdef SOC_I1S /* video entity - isp1 */ fimc_is_i1s_video_probe(core); #endif #ifdef SOC_I1C /* video entity - isp1 capture */ fimc_is_i1c_video_probe(core); #endif #ifdef SOC_I1P /* video entity - isp1 preview */ fimc_is_i1p_video_probe(core); #endif #ifdef SOC_DIS /* video entity - dis */ fimc_is_dis_video_probe(core); #endif #ifdef SOC_SCC /* video entity - scc */ fimc_is_scc_video_probe(core); #endif #ifdef SOC_SCP /* video entity - scp */ fimc_is_scp_video_probe(core); #endif #ifdef SOC_MCS /* video entity - scp */ fimc_is_m0s_video_probe(core); fimc_is_m1s_video_probe(core); fimc_is_m0p_video_probe(core); fimc_is_m1p_video_probe(core); fimc_is_m2p_video_probe(core); fimc_is_m3p_video_probe(core); fimc_is_m4p_video_probe(core); #endif platform_set_drvdata(pdev, core); #ifndef ENABLE_IS_CORE ret = fimc_is_interface_ischain_probe(&core->interface_ischain, &core->hardware, &core->resourcemgr, core->pdev, (ulong)core->regs); if (ret) { dev_err(&pdev->dev, "interface_ischain_probe fail\n"); goto p_err1; } ret = fimc_is_hardware_probe(&core->hardware, &core->interface, &core->interface_ischain); if (ret) { dev_err(&pdev->dev, "hardware_probe fail\n"); goto p_err1; } /* set sysfs for set position to actuator */ sysfs_actuator.init_step = 0; for (i = 0; i < INIT_MAX_SETTING; i++) { sysfs_actuator.init_positions[i] = -1; sysfs_actuator.init_delays[i] = -1; } #endif #if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433) #if defined(CONFIG_VIDEOBUF2_ION) if (core->resourcemgr.mem.alloc_ctx) vb2_ion_attach_iommu(core->resourcemgr.mem.alloc_ctx); #endif #endif EXYNOS_MIF_ADD_NOTIFIER(&exynos_fimc_is_mif_throttling_nb); #if defined(CONFIG_PM_RUNTIME) pm_runtime_enable(&pdev->dev); #endif #ifdef ENABLE_FAULT_HANDLER #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) exynos_sysmmu_set_fault_handler(fimc_is_dev, fimc_is_fault_handler); #else iovmm_set_fault_handler(fimc_is_dev, fimc_is_fault_handler, NULL); #endif #endif /* set sysfs for debuging */ sysfs_debug.en_clk_gate = 0; sysfs_debug.en_dvfs = 1; #ifdef ENABLE_CLOCK_GATE sysfs_debug.en_clk_gate = 1; #ifdef HAS_FW_CLOCK_GATE sysfs_debug.clk_gate_mode = CLOCK_GATE_MODE_FW; #else sysfs_debug.clk_gate_mode = CLOCK_GATE_MODE_HOST; #endif #endif ret = sysfs_create_group(&core->pdev->dev.kobj, &fimc_is_debug_attr_group); s = pinctrl_lookup_state(pdata->pinctrl, "release"); if (pinctrl_select_state(pdata->pinctrl, s) < 0) { probe_err("pinctrl_select_state is fail\n"); goto p_err3; } probe_info("%s:end\n", __func__); return 0; p_err3: iounmap(core->regs); #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) p_err2: release_mem_region(regs_res->start, resource_size(regs_res)); #endif p_err1: kfree(core); return ret; }