uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER], size_t allocsize, uint32_t count) { int ret = TUI_DCI_ERR_INTERNAL_ERROR; dma_addr_t buf_addr; ion_phys_addr_t phys_addr; unsigned long offset = 0; unsigned int size; size=allocsize*(count+1); client = ion_client_create(ion_exynos, "TUI module"); handle = ion_alloc(client, size, 0, EXYNOS_ION_HEAP_EXYNOS_CONTIG_MASK, ION_EXYNOS_VIDEO_MASK); dbuf = ion_share_dma_buf(client, handle); buf_addr = decon_map_sec_dma_buf(dbuf, 0); ion_phys(client, handle, (unsigned long *)&phys_addr, &dbuf->size); /* TUI frame buffer must be aligned 16M */ if(phys_addr % 0x1000000){ offset = 0x1000000 - (phys_addr % 0x1000000); } phys_addr = phys_addr+offset; va = buf_addr + offset; printk("buf_addr : %x\n",va); printk("phys_addr : %lx\n",phys_addr); #if 0 // this is testing. MUST BE REMOVE void *kernel_addr; //kernel_addr = (void*)ion_map_kernel(client, handle); kernel_addr = phys_to_virt(phys_addr+0x2000000); *((u32*)kernel_addr) = va; printk("DATA ON phys_addr : addr[%lx] val[%x]\n" ,phys_addr+0x2000000 ,*((u32*)kernel_addr)); #endif g_tuiMemPool.pa = phys_addr; g_tuiMemPool.size = allocsize*count; if ((size_t)(allocsize*count) <= g_tuiMemPool.size) { allocbuffer[0].pa = (uint64_t) g_tuiMemPool.pa; allocbuffer[1].pa = (uint64_t) (g_tuiMemPool.pa + g_tuiMemPool.size/2); }else{ /* requested buffer is bigger than the memory pool, return an error */ pr_debug("%s(%d): %s\n", __func__, __LINE__, "Memory pool too small"); ret = TUI_DCI_ERR_INTERNAL_ERROR; return ret; } ret = TUI_DCI_OK; return ret; }
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; switch (cmd) { case ION_IOC_ALLOC: { struct ion_allocation_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, data.flags); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) { ion_free(client, data.handle); return -EFAULT; } break; } case ION_IOC_FREE: { struct ion_handle_data data; bool valid; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); valid = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid) return -EINVAL; ion_free(client, data.handle); break; } case ION_IOC_MAP: case ION_IOC_SHARE: { struct ion_fd_data data; int ret; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; ret = ion_share_set_flags(client, data.handle, filp->f_flags); if (ret) return ret; data.fd = ion_share_dma_buf(client, data.handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; if (data.fd < 0) return data.fd; break; } case ION_IOC_IMPORT: { struct ion_fd_data data; int ret = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; data.handle = ion_import_dma_buf(client, data.fd); if (IS_ERR(data.handle)) data.handle = NULL; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; if (ret < 0) return ret; break; } case ION_IOC_CUSTOM: { struct ion_device *dev = client->dev; struct ion_custom_data data; if (!dev->custom_ioctl) return -ENOTTY; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_custom_data))) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } case ION_IOC_CLEAN_CACHES: case ION_IOC_INV_CACHES: case ION_IOC_CLEAN_INV_CACHES: { struct ion_flush_data data; unsigned long start, end; struct ion_handle *handle = NULL; int ret; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_flush_data))) return -EFAULT; start = (unsigned long) data.vaddr; end = (unsigned long) data.vaddr + data.length; if (check_vaddr_bounds(start, end)) { pr_err("%s: virtual address %p is out of bounds\n", __func__, data.vaddr); return -EINVAL; } if (!data.handle) { handle = ion_import_dma_buf(client, data.fd); if (IS_ERR(handle)) { pr_info("%s: Could not import handle: %d\n", __func__, (int)handle); return -EINVAL; } } ret = ion_do_cache_op(client, data.handle ? data.handle : handle, data.vaddr, data.offset, data.length, cmd); if (!data.handle) ion_free(client, handle); if (ret < 0) return ret; break; } case ION_IOC_GET_FLAGS: { struct ion_flag_data data; int ret; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_flag_data))) return -EFAULT; ret = ion_handle_get_flags(client, data.handle, &data.flags); if (ret < 0) return ret; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_flag_data))) return -EFAULT; break; } default: return -ENOTTY; } return 0; }
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; switch (cmd) { case ION_IOC_ALLOC_NEW: { struct ion_allocation_data_new data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, data.flags | data.heap_mask); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) { ion_free(client, data.handle); return -EFAULT; } break; } case ION_IOC_ALLOC: { struct ion_allocation_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, data.flags); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) { ion_free(client, data.handle); return -EFAULT; } break; } case ION_IOC_FREE: { struct ion_handle_data data; bool valid; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); valid = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid) return -EINVAL; ion_free(client, data.handle); break; } case ION_IOC_MAP: case ION_IOC_SHARE: { struct ion_fd_data data; int ret; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; ret = ion_share_set_flags(client, data.handle, filp->f_flags); if (ret) return ret; data.fd = ion_share_dma_buf(client, data.handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; if (data.fd < 0) return data.fd; break; } case ION_IOC_IMPORT: { struct ion_fd_data data; int ret = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; data.handle = ion_import_dma_buf(client, data.fd); if (IS_ERR(data.handle)) data.handle = NULL; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; if (ret < 0) return ret; break; } case ION_IOC_CUSTOM: { struct ion_device *dev = client->dev; struct ion_custom_data data; if (!dev->custom_ioctl) return -ENOTTY; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_custom_data))) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } case ION_IOC_CLEAN_CACHES_OLD: case ION_IOC_CLEAN_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_CACHES, arg); case ION_IOC_INV_CACHES_OLD: case ION_IOC_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_INV_CACHES, arg); case ION_IOC_CLEAN_INV_CACHES_OLD: case ION_IOC_CLEAN_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_INV_CACHES, arg); case ION_IOC_GET_FLAGS_OLD: case ION_IOC_GET_FLAGS: return client->dev->custom_ioctl(client, ION_IOC_GET_FLAGS, arg); case ION_IOC_CLIENT_RENAME: { struct ion_client_name_data data; int len; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_client_name_data))) return -EFAULT; if(data.len < 0) return -EFAULT; len = (ION_CLIENT_NAME_LENGTH > data.len) ? data.len: ION_CLIENT_NAME_LENGTH; if (copy_from_user(client->name, (void __user *)data.name, len)) return -EFAULT; client->name[len] = '\0'; break; } default: return -ENOTTY; } return 0; }