UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_meminfo_set(ump_dd_handle memh, void* args) { ump_dd_mem * mem; ump_secure_id secure_id; DEBUG_ASSERT_POINTER(memh); secure_id = ump_dd_secure_id_get(memh); _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem)) { device.backend->set(mem, args); } else { _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); DBG_MSG(1, ("Failed to look up mapping in ump_meminfo_set(). ID: %u\n", (ump_secure_id)secure_id)); return UMP_DD_INVALID; } _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); return UMP_DD_SUCCESS; }
static int _disp_get_ump_secure_id(struct fb_info *info, struct rk_fb_inf *g_fbi, unsigned long arg, int nbuf) { u32 __user *psecureid = (u32 __user *) arg; int buf_len = 1920*1080*4; // int buf_len = info->var.xres * info->var.yres * (info->var.bits_per_pixel >> 3); ump_secure_id secure_id; int layer_id = get_fb_layer_id(&info->fix); //if(nbuf>0) return -ENOTSUPP; //printk("\nUMP: ENTER num_fb:%d num_buf:%d",layer_id,nbuf); if (!(info->var.yres * 2 <= info->var.yres_virtual))//IAM printk("\nUMP: Double-buffering not enabled"); // else if (!g_fbi->ump_wrapped_buffer[layer_id][nbuf]) { ump_dd_physical_block ump_memory_description; ump_memory_description.addr = info->fix.smem_start; ump_memory_description.size = buf_len; if (nbuf > 0) { ump_memory_description.addr += (buf_len * nbuf); // ump_memory_description.size = buf_len; } //printk("\nUMP: nbuf:%d, addr:%X, size:%X\n",nbuf, ump_memory_description.addr,ump_memory_description.size); g_fbi->ump_wrapped_buffer[layer_id][nbuf] = ump_dd_handle_create_from_phys_blocks (&ump_memory_description, 1); } secure_id = ump_dd_secure_id_get(g_fbi-> ump_wrapped_buffer[layer_id][nbuf]); //printk("UMP: secure_id:%X, arg:%X",secure_id,arg); return put_user((unsigned int)secure_id, psecureid); }
static int _disp_get_ump_secure_id(struct fb_info *info, myfb_dev_t *g_fbi, unsigned long arg, int buf) { u32 __user *psecureid = (u32 __user *) arg; ump_secure_id secure_id; if (!g_fbi->ump_wrapped_buffer[info->node][buf]) { printk("create_disp_get_ump_secure_id******%d\n", buf); ump_dd_physical_block ump_memory_description; ump_memory_description.addr = info->fix.smem_start; ump_memory_description.size = info->fix.smem_len; g_fbi->ump_wrapped_buffer[info->node][buf] = ump_dd_handle_create_from_phys_blocks(&ump_memory_description, 1); } secure_id = ump_dd_secure_id_get(g_fbi->ump_wrapped_buffer[info->node][buf]); return put_user((unsigned int)secure_id, psecureid); }
int s3cfb_ioctl(struct fb_info *fb, unsigned int cmd, unsigned long arg) { struct fb_var_screeninfo *var = &fb->var; struct s3cfb_window *win = fb->par; struct s3cfb_global *fbdev = get_fimd_global(fb->node); struct s3cfb_lcd *lcd = fbdev->lcd; int ret = 0; union { struct s3cfb_user_window user_window; struct s3cfb_user_plane_alpha user_alpha; struct s3cfb_user_chroma user_chroma; int vsync; } p; switch (cmd) { case FBIO_WAITFORVSYNC: s3cfb_wait_for_vsync(fbdev); break; case S3CFB_WIN_POSITION: if (copy_from_user(&p.user_window, (struct s3cfb_user_window __user *)arg, sizeof(p.user_window))) ret = -EFAULT; else { if (p.user_window.x < 0) p.user_window.x = 0; if (p.user_window.y < 0) p.user_window.y = 0; if (p.user_window.x + var->xres > lcd->width) win->x = lcd->width - var->xres; else win->x = p.user_window.x; if (p.user_window.y + var->yres > lcd->height) win->y = lcd->height - var->yres; else win->y = p.user_window.y; s3cfb_set_window_position(fbdev, win->id); } break; case S3CFB_WIN_SET_PLANE_ALPHA: if (copy_from_user(&p.user_alpha, (struct s3cfb_user_plane_alpha __user *)arg, sizeof(p.user_alpha))) ret = -EFAULT; else { win->alpha.mode = PLANE_BLENDING; win->alpha.channel = p.user_alpha.channel; win->alpha.value = S3CFB_AVALUE(p.user_alpha.red, p.user_alpha.green, p.user_alpha.blue); s3cfb_set_alpha_blending(fbdev, win->id); } break; case S3CFB_WIN_SET_CHROMA: if (copy_from_user(&p.user_chroma, (struct s3cfb_user_chroma __user *)arg, sizeof(p.user_chroma))) ret = -EFAULT; else { win->chroma.enabled = p.user_chroma.enabled; win->chroma.key = S3CFB_CHROMA(p.user_chroma.red, p.user_chroma.green, p.user_chroma.blue); s3cfb_set_chroma_key(fbdev, win->id); } break; case S3CFB_SET_VSYNC_INT: if (get_user(p.vsync, (int __user *)arg)) ret = -EFAULT; else { if (p.vsync) s3cfb_set_global_interrupt(fbdev, 1); s3cfb_set_vsync_interrupt(fbdev, p.vsync); } break; #if MALI_USE_UNIFIED_MEMORY_PROVIDER case S3CFB_GET_FB_UMP_SECURE_ID_0: { u32 __user *psecureid = (u32 __user *) arg; ump_secure_id secure_id; dev_info(fbdev->dev, "ump_dd_secure_id_get\n"); secure_id = ump_dd_secure_id_get(ump_wrapped_buffer); dev_info(fbdev->dev, "Saving secure id 0x%x in userptr %p\n" , (unsigned int)secure_id, psecureid); dev_dbg(fbdev->dev, "Saving secure id 0x%x in userptr %p\n" , (unsigned int)secure_id, psecureid); return put_user((unsigned int)secure_id, psecureid); } break; #endif } return ret; }
/* * IOCTL operation; Import fd to UMP memory */ int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data * session_data) { _ump_uk_ion_import_s user_interaction; ump_dd_handle *ump_handle; ump_dd_physical_block * blocks; unsigned long num_blocks; struct ion_handle *ion_hnd; struct scatterlist *sg; struct scatterlist *sg_ion; unsigned long i = 0; ump_session_memory_list_element * session_memory_element = NULL; if (ion_client_ump==NULL) ion_client_ump = ion_client_create(ion_exynos, -1, "ump"); /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n")); return -ENOTTY; } /* Copy the user space memory to kernel space (so we safely can read it) */ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; /* translate fd to secure ID*/ ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd); sg_ion = ion_map_dma(ion_client_ump,ion_hnd); blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024); if (NULL == blocks) { MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n")); return -ENOMEM; } sg = sg_ion; do { blocks[i].addr = sg_phys(sg); blocks[i].size = sg_dma_len(sg); i++; if (i>=1024) { _mali_osk_free(blocks); MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n")); return -EFAULT; } sg = sg_next(sg); } while(sg); num_blocks = i; /* Initialize the session_memory_element, and add it to the session object */ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element)); if (NULL == session_memory_element) { _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks); if (UMP_DD_HANDLE_INVALID == ump_handle) { _mali_osk_free(session_memory_element); _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } session_memory_element->mem = (ump_dd_mem*)ump_handle; _mali_osk_mutex_wait(session_data->lock); _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list)); _mali_osk_mutex_signal(session_data->lock); ion_unmap_dma(ion_client_ump,ion_hnd); ion_free(ion_client_ump, ion_hnd); _mali_osk_free(blocks); user_interaction.secure_id = ump_dd_secure_id_get(ump_handle); user_interaction.size = ump_dd_size_get(ump_handle); user_interaction.ctx = NULL; if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n")); return -EFAULT; } return 0; /* success */ }
static int Fb_ioctl(struct fb_info *info, unsigned int cmd,unsigned long arg) { long ret = 0; unsigned long layer_hdl = 0; #ifdef CONFIG_LYCHEE_FB_UMP_SUN4I u32 __user *psecureid = (u32 __user *) arg; ump_secure_id secure_id; #endif switch (cmd) { case FBIOGET_LAYER_HDL_0: if(g_fbi.fb_mode[info->node] != FB_MODE_SCREEN1) { layer_hdl = g_fbi.layer_hdl[info->node][0]; ret = copy_to_user((void __user *)arg, &layer_hdl, sizeof(unsigned long)); } else { ret = -1; } break; case FBIOGET_LAYER_HDL_1: if(g_fbi.fb_mode[info->node] != FB_MODE_SCREEN0) { layer_hdl = g_fbi.layer_hdl[info->node][1]; ret = copy_to_user((void __user *)arg, &layer_hdl, sizeof(unsigned long)); } else { ret = -1; } break; #if 0 case FBIOGET_VBLANK: { struct fb_vblank vblank; __disp_tcon_timing_t tt; __u32 line = 0; __u32 sel; sel = (g_fbi.fb_mode[info->node] == FB_MODE_SCREEN1)?1:0; line = BSP_disp_get_cur_line(sel); BSP_disp_get_timming(sel, &tt); memset(&vblank, 0, sizeof(struct fb_vblank)); vblank.flags |= FB_VBLANK_HAVE_VBLANK; vblank.flags |= FB_VBLANK_HAVE_VSYNC; if(line <= (tt.ver_total_time-tt.ver_pixels)) { vblank.flags |= FB_VBLANK_VBLANKING; } if((line > tt.ver_front_porch) && (line < (tt.ver_front_porch+tt.ver_sync_time))) { vblank.flags |= FB_VBLANK_VSYNCING; } if (copy_to_user((void __user *)arg, &vblank, sizeof(struct fb_vblank))) ret = -EFAULT; break; } #endif case FBIO_WAITFORVSYNC: { ret = Fb_wait_for_vsync(info); break; } #ifdef CONFIG_LYCHEE_FB_UMP_SUN4I case GET_UMP_SECURE_ID: { secure_id = ump_dd_secure_id_get( ump_wrapped_buffer ); return put_user( (unsigned int)secure_id, psecureid ); break; } #endif default: //__inf("not supported fb io cmd:%x\n", cmd); break; } return ret; }
static int osd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct myfb_dev *fbdev = (struct myfb_dev *)info->par; void __user *argp = (void __user *)arg; u32 src_colorkey;//16 bit or 24 bit u32 srckey_enable; u32 gbl_alpha; u32 osd_order; s32 osd_axis[4] = {0}; u32 block_windows[8] = {0}; u32 block_mode; unsigned long ret; switch (cmd) { case FBIOPUT_OSD_SRCKEY_ENABLE: ret=copy_from_user(&srckey_enable,argp,sizeof(u32)); break; case FBIOPUT_OSD_SRCCOLORKEY: ret=copy_from_user(&src_colorkey,argp,sizeof(u32)); break ; case FBIOPUT_OSD_SET_GBL_ALPHA: ret=copy_from_user(&gbl_alpha,argp,sizeof(u32)); break; case FBIOPUT_OSD_SCALE_AXIS: ret=copy_from_user(&osd_axis, argp, 4 * sizeof(s32)); break; case FBIOGET_OSD_SCALE_AXIS: case FBIOPUT_OSD_ORDER: case FBIOGET_OSD_ORDER: case FBIOGET_OSD_GET_GBL_ALPHA: case FBIOPUT_OSD_2X_SCALE: case FBIOPUT_OSD_ENABLE_3D_MODE: case FBIOPUT_OSD_FREE_SCALE_ENABLE: case FBIOPUT_OSD_FREE_SCALE_WIDTH: case FBIOPUT_OSD_FREE_SCALE_HEIGHT: case FBIOGET_OSD_BLOCK_WINDOWS: case FBIOGET_OSD_BLOCK_MODE: case FBIOGET_OSD_FREE_SCALE_AXIS: break; case FBIOPUT_OSD_BLOCK_MODE: block_mode = (u32)argp; break; case FBIOPUT_OSD_BLOCK_WINDOWS: ret=copy_from_user(&block_windows, argp, 8 * sizeof(u32)); break; case FBIOPUT_OSD_FREE_SCALE_AXIS: ret=copy_from_user(&osd_axis, argp, 4 * sizeof(s32)); break; case GET_UMP_SECURE_ID_BUF1: { u32 __user *psecureid = (u32 __user *) arg; ump_secure_id secure_id; if (!ump_wrapped_buffer[0]) { ump_dd_physical_block ump_memory_description; ump_memory_description.addr = info->fix.smem_start; ump_memory_description.size = info->fix.smem_len; ump_wrapped_buffer[0] = ump_dd_handle_create_from_phys_blocks( &ump_memory_description, 1); } //Get secure id for top-half of framebuffer secure_id = ump_dd_secure_id_get(ump_wrapped_buffer[0]); return put_user((unsigned int)secure_id, psecureid); break; } case GET_UMP_SECURE_ID_BUF2: { u32 __user *psecureid = (u32 __user *) arg; ump_secure_id secure_id; if (!ump_wrapped_buffer[1]) { ump_dd_physical_block ump_memory_description; ump_memory_description.addr = info->fix.smem_start; ump_memory_description.size = info->fix.smem_len; ump_wrapped_buffer[1] = ump_dd_handle_create_from_phys_blocks( &ump_memory_description, 1); } //Get secure id for bottom-half of framebuffer secure_id = ump_dd_secure_id_get(ump_wrapped_buffer[1]); return put_user((unsigned int)secure_id, psecureid); break; } default : amlog_mask_level(LOG_MASK_IOCTL,LOG_LEVEL_HIGH,"command not supported\r\n "); return -1; } mutex_lock(&fbdev->lock); switch (cmd) { case FBIOPUT_OSD_ORDER: osddev_change_osd_order(info->node,arg); break; case FBIOGET_OSD_ORDER: osd_order=osddev_get_osd_order(info->node); ret=copy_to_user(argp, &osd_order, sizeof(u32)); break; case FBIOPUT_OSD_FREE_SCALE_WIDTH: osddev_free_scale_width(info->node,arg); break; case FBIOPUT_OSD_FREE_SCALE_HEIGHT: osddev_free_scale_height(info->node,arg); break; case FBIOPUT_OSD_FREE_SCALE_ENABLE: osddev_free_scale_enable(info->node,arg); break; case FBIOPUT_OSD_ENABLE_3D_MODE: osddev_enable_3d_mode(info->node,arg); break; case FBIOPUT_OSD_2X_SCALE: //arg :higher 16 bit h_scale_enable, lower 16 bit v_scale_enable osddev_set_2x_scale(info->node,arg&0xffff0000?1:0,arg&0xffff?1:0); break; case FBIOPUT_OSD_SRCCOLORKEY: switch(fbdev->color->color_index) { case COLOR_INDEX_16_655: case COLOR_INDEX_16_844: case COLOR_INDEX_16_565: case COLOR_INDEX_24_888_B: case COLOR_INDEX_24_RGB: case COLOR_INDEX_YUV_422: amlog_mask_level(LOG_MASK_IOCTL,LOG_LEVEL_LOW,"set osd color key 0x%x\r\n",src_colorkey); fbdev->color_key = src_colorkey; osddev_set_colorkey(info->node,fbdev->color->color_index,src_colorkey); break; default: break; } break ; case FBIOPUT_OSD_SRCKEY_ENABLE: switch(fbdev->color->color_index) { case COLOR_INDEX_16_655: case COLOR_INDEX_16_844: case COLOR_INDEX_16_565: case COLOR_INDEX_24_888_B: case COLOR_INDEX_24_RGB: case COLOR_INDEX_YUV_422: amlog_mask_level(LOG_MASK_IOCTL,LOG_LEVEL_LOW,"set osd color key %s\r\n",srckey_enable?"enable":"disable"); if (srckey_enable != 0) { fbdev->enable_key_flag |= KEYCOLOR_FLAG_TARGET; if (!(fbdev->enable_key_flag & KEYCOLOR_FLAG_ONHOLD)) { osddev_srckey_enable(info->node, 1); fbdev->enable_key_flag |= KEYCOLOR_FLAG_CURRENT; } } else { osddev_srckey_enable(info->node, 0); fbdev->enable_key_flag &= ~(KEYCOLOR_FLAG_TARGET | KEYCOLOR_FLAG_CURRENT); } break; default:break; } break; case FBIOPUT_OSD_SET_GBL_ALPHA: osddev_set_gbl_alpha(info->node,gbl_alpha); break; case FBIOGET_OSD_GET_GBL_ALPHA: gbl_alpha=osddev_get_gbl_alpha(info->node); ret=copy_to_user(argp, &gbl_alpha, sizeof(u32)); break; case FBIOGET_OSD_SCALE_AXIS: osddev_get_scale_axis(info->node, &osd_axis[0], &osd_axis[1], &osd_axis[2], &osd_axis[3]); ret=copy_to_user(argp, &osd_axis, 4 * sizeof(s32)); break; case FBIOPUT_OSD_SCALE_AXIS: osddev_set_scale_axis(info->node, osd_axis[0], osd_axis[1], osd_axis[2], osd_axis[3]); break; case FBIOGET_OSD_BLOCK_WINDOWS: osddev_get_block_windows(info->node, block_windows); ret=copy_to_user(argp, &block_windows, 8 * sizeof(u32)); break; case FBIOPUT_OSD_BLOCK_WINDOWS: osddev_set_block_windows(info->node, block_windows); break; case FBIOPUT_OSD_BLOCK_MODE: osddev_set_block_mode(info->node, block_mode); break; case FBIOGET_OSD_BLOCK_MODE: osddev_get_block_mode(info->node, &block_mode); ret=copy_to_user(argp, &block_mode, sizeof(u32)); break; case FBIOGET_OSD_FREE_SCALE_AXIS: osddev_get_free_scale_axis(info->node, &osd_axis[0], &osd_axis[1], &osd_axis[2], &osd_axis[3]); ret=copy_to_user(argp, &osd_axis, 4 * sizeof(s32)); break; case FBIOPUT_OSD_FREE_SCALE_AXIS: osddev_set_free_scale_axis(info->node, osd_axis[0], osd_axis[1], osd_axis[2], osd_axis[3]); break; default: break; } mutex_unlock(&fbdev->lock); return 0; }