struct ion_handle* ion_drv_get_kernel_handle(struct ion_client* client, void *handle, int from_kernel) { struct ion_handle *kernel_handle = handle; if(!from_kernel) { kernel_handle = ion_uhandle_get(client, handle); if(!kernel_handle) { IONMSG("handle invalid, handle_id=%d\n", __FUNCTION__,handle); return ERR_PTR(-EINVAL); } } else { if(!ion_handle_validate(client, handle)) { IONMSG(" handle invalid, handle=0x%x\n", __FUNCTION__, handle); return ERR_PTR(-EINVAL); } } return kernel_handle; }
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { long ret; if (!filp->f_op || !filp->f_op->unlocked_ioctl) { IONMSG("compat_ion_ioctl file has no f_op or no f_op->unlocked_ioctl.\n"); return -ENOTTY; } switch (cmd) { case COMPAT_ION_IOC_ALLOC: { struct compat_ion_allocation_data __user *data32; struct ion_allocation_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) { IONMSG("COMPAT_ION_IOC_ALLOC alloc user space fail!.\n"); return -EFAULT; } err = compat_get_ion_allocation_data(data32, data); if (err) { IONMSG("COMPAT_ION_IOC_ALLOC get ion allocation data fail!. err = %d.\n", err); return err; } ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, (unsigned long)data); if (ret) IONMSG("COMPAT_ION_IOC_ALLOC unlocked_ioctl fail! ret = %ld.\n", ret); err = compat_put_ion_allocation_data(data32, data); if (err) IONMSG("COMPAT_ION_IOC_ALLOC put ion allocation data fail! err = %d.\n", err); return ret ? ret : err; } case COMPAT_ION_IOC_FREE: { struct compat_ion_handle_data __user *data32; struct ion_handle_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) { IONMSG("COMPAT_ION_IOC_FREE alloc user space fail!.\n"); return -EFAULT; } err = compat_get_ion_handle_data(data32, data); if (err) { IONMSG("COMPAT_ION_IOC_FREE get ion handle data fail!. err = %d.\n", err); return err; } ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, (unsigned long)data); if (ret) IONMSG("COMPAT_ION_IOC_FREE unlocked_ioctl fail! ret = %ld.\n", ret); return ret; } case COMPAT_ION_IOC_CUSTOM: { struct compat_ion_custom_data __user *data32; struct ion_custom_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) { IONMSG("COMPAT_ION_IOC_CUSTOM alloc user space fail!.\n"); return -EFAULT; } err = compat_get_ion_custom_data(data32, data); if (err) { IONMSG("COMPAT_ION_IOC_CUSTOM get ion custom data fail!. err = %d.\n", err); return err; } ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, (unsigned long)data); if (ret) IONMSG("COMPAT_ION_IOC_CUSTOM unlocked_ioctl fail! ret = %ld.\n", ret); err = compat_put_ion_custom_data(data32, data); if (err) IONMSG("COMPAT_ION_IOC_CUSTOM put ion custom data fail! err = %d.\n", err); return ret ? ret : err; } case ION_IOC_SHARE: case ION_IOC_MAP: case ION_IOC_IMPORT: case ION_IOC_SYNC: return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); default: { IONMSG("compat_ion_ioctl : No such command!! 0x%x\n", cmd); return -ENOIOCTLCMD; } } }
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam, int from_kernel) { ION_FUNC_ENTER; if (pParam->sync_type < ION_CACHE_CLEAN_ALL) { // By range operation unsigned int start; size_t size; unsigned int end, page_num, page_start; struct ion_handle *kernel_handle; kernel_handle = ion_drv_get_kernel_handle(client, pParam->handle, from_kernel); if(IS_ERR(kernel_handle)) { IONMSG("ion cache sync fail! \n"); return -EINVAL; } #ifdef __ION_CACHE_SYNC_USER_VA_EN__ if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA) #else if(1) #endif { start = (unsigned int) ion_map_kernel(client, kernel_handle); if(IS_ERR_OR_NULL((void*)start)) { IONMSG("cannot do cachesync, unable to map_kernel: ret=%d\n", start); return -EFAULT; } size = ion_handle_buffer(kernel_handle)->size; } else { start = pParam->va; size = pParam->size; } // Cache line align end = start + size; start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES); size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES; page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER; page_start = start & PAGE_MASK; // L1 cache sync if((pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE) || (pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size); dmac_map_area((void*)start, size, DMA_TO_DEVICE); } else if ((pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)||(pParam->sync_type == ION_CACHE_INVALID_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size); dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE); } else if ((pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)||(pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size); dmac_flush_range((void*)start, (void*)(start+size-1)); } #if 0 // L2 cache sync //printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num); for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE) { phys_addr_t phys_addr; if (page_start>=VMALLOC_START && page_start<=VMALLOC_END) { ppage = vmalloc_to_page((void*)page_start); if (!ppage) { printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start); ion_unmap_kernel(client, pParam->handle); return -EFAULT; } phys_addr = page_to_phys(ppage); } else phys_addr = virt_to_phys((void*)page_start); if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE) outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE) outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE) outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); } #endif #ifdef __ION_CACHE_SYNC_USER_VA_EN__ if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA) #endif { ion_unmap_kernel(client, kernel_handle); } if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagEnd, size, 0); else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagEnd, size, 0); else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagEnd, size, 0); }