int isp_process_mem_data(struct isp_mem_data *data) { int i; int ret = -1; struct isp_mem_data *ppreview_user = \ (struct isp_mem_data *)data; struct isp_mem_data preview_param; u32 input_buffer_size, output_buffer_size; u32 input_nr_pages, output_nr_pages; struct page **input_pages = NULL; struct page **output_pages = NULL; unsigned long isp_addr_in = 0; unsigned long isp_addr_out = 0; unsigned long isp_addr_tmp = 0; unsigned long timeout; struct isp_mem_resize_data resizer_param; u16 cropadjust = 0; if (ppreview_user == NULL) { printk(KERN_ERR "ISP_PROC_ERR: Invalid user data!\n"); return -EINVAL; } memcpy(&preview_param, ppreview_user, \ sizeof(struct isp_mem_data)); DPRINTK_ISPPROC("input(%d-%d) - output(%d-%d)\n", preview_param.input_width, preview_param.input_height, preview_param.output_width, preview_param.output_height); DPRINTK_ISPPROC("start(%d-%d) - end(%d-%d)\n", preview_param.left, preview_param.top, preview_param.crop_width, preview_param.crop_height); if (ppreview_user->datain == 0 || ppreview_user->dataout == 0) return -EINVAL; isppreview_enable(0); ispresizer_enable(0); timeout = jiffies + msecs_to_jiffies(200); while (isppreview_busy() || ispresizer_busy()) { if (time_after(jiffies, timeout)) return -EINVAL; msleep(1); } isppreview_save_context(); ispresizer_save_context(); isppreview_free(); ispresizer_free(); isppreview_request(); ispresizer_request(); /* set data path before configuring modules. */ isppreview_update_datapath(PRV_RAW_MEM, PREVIEW_MEM); ispresizer_config_datapath(RSZ_MEM_YUV, 0); ret = isppreview_try_size(preview_param.input_width, preview_param.input_height, &preview_param.output_width, &preview_param.output_height); if (ret < 0) goto exit_cleanup; ret = isppreview_config_size(preview_param.input_width, preview_param.input_height, preview_param.output_width, preview_param.output_height); if (ret < 0) goto exit_cleanup; input_buffer_size = ALIGN_TO(ppreview_user->input_width* \ ppreview_user->input_height*2 , 0x100); input_pages = map_user_memory_to_kernel(preview_param.datain, input_buffer_size, &input_nr_pages); if (input_pages == NULL) { ret = -EINVAL; printk(KERN_ERR "ISP_PROC_ERR: memory allocation failed\n"); goto exit_cleanup; } output_buffer_size = ALIGN_TO(ppreview_user->output_width* \ ppreview_user->output_height*2, 0x1000); output_pages = map_user_memory_to_kernel(preview_param.dataout, output_buffer_size, &output_nr_pages); if (output_pages == NULL) { ret = -EINVAL; printk(KERN_ERR "ISP_PROC_ERR: memory allocation failed\n"); goto exit_cleanup; } for (i = 0; i < output_nr_pages; ++i) flush_dcache_page(output_pages[i]); isp_addr_in = ispmmu_vmap_pages(input_pages, input_nr_pages); if (IS_ERR((void *)isp_addr_in)) { isp_addr_in = 0; ret = -EINVAL; printk(KERN_ERR "ISP_PROC_ERR: isp mmu map failed\n"); goto exit_cleanup; } isp_addr_out = ispmmu_vmap_pages(output_pages, output_nr_pages); if (IS_ERR((void *)isp_addr_out)) { isp_addr_out = 0; ret = -EINVAL; printk(KERN_ERR "ISP_PROC_ERR: isp mmu map failed\n"); goto exit_cleanup; } /* This buffer must be allocated and mapped to the ISP MMU previously. */ isp_addr_tmp = isp_tmp_buf_addr(); if (isp_addr_tmp == 0) { printk(KERN_ERR "ISP_PROC_ERR: Invalid isp tmp buffer address!\n"); goto exit_cleanup; } isppreview_config_inlineoffset(ppreview_user->input_width * 2); isppreview_set_inaddr(isp_addr_in); isppreview_set_outaddr(isp_addr_tmp); resizer_param.input_width = preview_param.output_width; resizer_param.input_height = preview_param.output_height; resizer_param.output_width = ppreview_user->output_width; resizer_param.output_height = ppreview_user->output_height; if ((preview_param.left == 0) && (preview_param.top == 0)) { ret = ispresizer_try_size(&resizer_param.input_width, &resizer_param.input_height, &resizer_param.output_width, &resizer_param.output_height); if (ret < 0) goto exit_cleanup; ret = ispresizer_config_size(resizer_param.input_width, resizer_param.input_height, resizer_param.output_width, resizer_param.output_height); if (ret < 0) goto exit_cleanup; ispresizer_set_inaddr(isp_addr_tmp); } else { ispresizer_trycrop(preview_param.left, preview_param.top, preview_param.crop_width, preview_param.crop_height, resizer_param.output_width, resizer_param.output_height); ispresizer_applycrop(); /* account for pixel loss when using crop*/ if ((preview_param.input_height > preview_param.output_height) && (preview_param.top > 16)) cropadjust = 8; else cropadjust = 0; /* pixel alignment in 32bit space, vertical must be 0 per TRM */ isp_reg_writel(((preview_param.left%16) << ISPRSZ_IN_START_HORZ_ST_SHIFT) | (0 << ISPRSZ_IN_START_VERT_ST_SHIFT), OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START); /* Align input address for cropping, per TRM */ ispresizer_set_inaddr(isp_addr_tmp - (resizer_param.input_width*2*cropadjust) + (preview_param.top*resizer_param.input_width*2) + ((preview_param.left/16)*32)); } ispresizer_set_outaddr(isp_addr_out); ispresizer_config_inlineoffset( ALIGN_TO(resizer_param.input_width*2, 32)); if (isp_set_callback(CBK_PREV_DONE, prv_isr, (void *) NULL, (void *)NULL) != 0) { printk(KERN_ERR "ISP_PROC_ERR: Error setting PRV callback.\n"); goto exit_cleanup; } if (isp_set_callback(CBK_RESZ_DONE, rsz_isr, (void *) NULL, (void *)NULL) != 0) { printk(KERN_ERR "ISP_PROC_ERR: Error setting RSZ callback.\n"); goto exit_cleanup; } isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_wfc.done = 0; /* start preview engine. */ isppreview_enable(1); ret = wait_for_completion_timeout(&isp_wfc, msecs_to_jiffies(1000)); if (!ret) { isppreview_enable(0); ispresizer_enable(0); } timeout = jiffies + msecs_to_jiffies(50); while (ispresizer_busy()) { msleep(5); if (time_after(jiffies, timeout)) { printk(KERN_ERR "ISP_RESZ_ERR: Resizer still busy"); break; } } isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_unset_callback(CBK_PREV_DONE); isp_unset_callback(CBK_RESZ_DONE); exit_cleanup: isppreview_restore_context(); ispresizer_restore_context(); if (isp_addr_in != 0) ispmmu_vunmap(isp_addr_in); if (isp_addr_out != 0) ispmmu_vunmap(isp_addr_out); if (input_pages != NULL) { unmap_user_memory_from_kernel(input_pages, input_nr_pages); kfree(input_pages); } if (output_pages != NULL) { unmap_user_memory_from_kernel(output_pages, output_nr_pages); kfree(output_pages); } DPRINTK_ISPPROC("exit.\n"); return ret; }
/* * mono_arch_get_gsharedvt_call_info: * * Compute calling convention information for marshalling a call between NORMAL_SIG and GSHAREDVT_SIG. * If GSHAREDVT_IN is TRUE, then the caller calls using the signature NORMAL_SIG but the call is received by * a method with signature GSHAREDVT_SIG, otherwise its the other way around. */ gpointer mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli) { GSharedVtCallInfo *info; CallInfo *caller_cinfo, *callee_cinfo; MonoMethodSignature *caller_sig, *callee_sig; int i, j; gboolean var_ret = FALSE; CallInfo *cinfo, *gcinfo; MonoMethodSignature *sig, *gsig; GPtrArray *map; if (gsharedvt_in) { caller_sig = normal_sig; callee_sig = gsharedvt_sig; caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); } else { callee_sig = normal_sig; callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); caller_sig = gsharedvt_sig; caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); } /* * If GSHAREDVT_IN is true, this means we are transitioning from normal to gsharedvt code. The caller uses the * normal call signature, while the callee uses the gsharedvt signature. * If GSHAREDVT_IN is false, its the other way around. */ /* sig/cinfo describes the normal call, while gsig/gcinfo describes the gsharedvt call */ if (gsharedvt_in) { sig = caller_sig; gsig = callee_sig; cinfo = caller_cinfo; gcinfo = callee_cinfo; } else { sig = callee_sig; gsig = caller_sig; cinfo = callee_cinfo; gcinfo = caller_cinfo; } if (gcinfo->vtype_retaddr && gsig->ret && mini_is_gsharedvt_type (gsig->ret)) { /* * The return type is gsharedvt */ var_ret = TRUE; } /* * The stack looks like this: * <arguments> * <ret addr> * <saved ebp> * <call area> * We have to map the stack slots in <arguments> to the stack slots in <call area>. */ map = g_ptr_array_new (); if (cinfo->vtype_retaddr) { /* * Map ret arg. * This handles the case when the method returns a normal vtype, and when it returns a type arg, and its instantiated * with a vtype. */ g_ptr_array_add (map, GUINT_TO_POINTER (caller_cinfo->vret_arg_offset / sizeof (gpointer))); g_ptr_array_add (map, GUINT_TO_POINTER (callee_cinfo->vret_arg_offset / sizeof (gpointer))); } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &caller_cinfo->args [i]; ArgInfo *ainfo2 = &callee_cinfo->args [i]; int nslots; switch (ainfo->storage) { case ArgGSharedVt: if (ainfo2->storage == ArgOnStack) { nslots = callee_cinfo->args [i].nslots; if (!nslots) nslots = 1; g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo->offset / sizeof (gpointer)) + (1 << 16) + (nslots << 18))); g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo2->offset / sizeof (gpointer)))); } else { g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo->offset / sizeof (gpointer)))); g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo2->offset / sizeof (gpointer)))); } break; default: if (ainfo2->storage == ArgOnStack) { nslots = cinfo->args [i].nslots; if (!nslots) nslots = 1; for (j = 0; j < nslots; ++j) { g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo->offset / sizeof (gpointer)) + j)); g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo2->offset / sizeof (gpointer)) + j)); } } else { g_assert (ainfo2->storage == ArgGSharedVt); g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo->offset / sizeof (gpointer)) + (2 << 16))); g_ptr_array_add (map, GUINT_TO_POINTER ((ainfo2->offset / sizeof (gpointer)))); } break; } } info = mono_domain_alloc0 (mono_domain_get (), sizeof (GSharedVtCallInfo) + (map->len * sizeof (int))); info->addr = addr; info->stack_usage = callee_cinfo->stack_usage; info->ret_marshal = GSHAREDVT_RET_NONE; info->gsharedvt_in = gsharedvt_in ? 1 : 0; info->vret_slot = -1; info->calli = calli ? 1 : 0; if (var_ret) info->vret_arg_slot = gcinfo->vret_arg_offset / sizeof (gpointer); else info->vret_arg_slot = -1; info->vcall_offset = vcall_offset; info->map_count = map->len / 2; for (i = 0; i < map->len; ++i) info->map [i] = GPOINTER_TO_UINT (g_ptr_array_index (map, i)); g_ptr_array_free (map, TRUE); /* Compute return value marshalling */ if (var_ret) { switch (cinfo->ret.storage) { case ArgInIReg: if (gsharedvt_in && !sig->ret->byref && sig->ret->type == MONO_TYPE_I1) info->ret_marshal = GSHAREDVT_RET_I1; else if (gsharedvt_in && !sig->ret->byref && (sig->ret->type == MONO_TYPE_U1 || sig->ret->type == MONO_TYPE_BOOLEAN)) info->ret_marshal = GSHAREDVT_RET_U1; else if (gsharedvt_in && !sig->ret->byref && sig->ret->type == MONO_TYPE_I2) info->ret_marshal = GSHAREDVT_RET_I2; else if (gsharedvt_in && !sig->ret->byref && (sig->ret->type == MONO_TYPE_U2 || sig->ret->type == MONO_TYPE_CHAR)) info->ret_marshal = GSHAREDVT_RET_U2; else if (cinfo->ret.is_pair) info->ret_marshal = GSHAREDVT_RET_IREGS; else info->ret_marshal = GSHAREDVT_RET_IREG; break; case ArgOnDoubleFpStack: info->ret_marshal = GSHAREDVT_RET_DOUBLE_FPSTACK; break; case ArgOnFloatFpStack: info->ret_marshal = GSHAREDVT_RET_FLOAT_FPSTACK; break; case ArgOnStack: /* The caller passes in a vtype ret arg as well */ g_assert (gcinfo->vtype_retaddr); /* Just have to pop the arg, as done by normal methods in their epilog */ info->ret_marshal = GSHAREDVT_RET_STACK_POP; break; default: g_assert_not_reached (); } } else if (gsharedvt_in && cinfo->vtype_retaddr) { info->ret_marshal = GSHAREDVT_RET_STACK_POP; } if (gsharedvt_in && var_ret && !caller_cinfo->vtype_retaddr) { /* Allocate stack space for the return value */ info->vret_slot = info->stack_usage / sizeof (gpointer); // FIXME: info->stack_usage += sizeof (gpointer) * 3; } info->stack_usage = ALIGN_TO (info->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); g_free (caller_cinfo); g_free (callee_cinfo); return info; }
void mt_disp_fill_rect(UINT32 left, UINT32 top, UINT32 right, UINT32 bottom, UINT32 color) { void * fb_addr = mt_get_fb_addr(); const UINT32 WIDTH = ALIGN_TO(CFG_DISPLAY_WIDTH, 32); const UINT32 HEIGHT = CFG_DISPLAY_HEIGHT; const UINT16 COLOR = (UINT16)color; UINT16 *pLine; INT32 x, y; pLine = (UINT16 *)fb_addr + top * WIDTH + left; #if 1 if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "270", 3)) { unsigned int l; UINT16 *d = fb_addr; for (x=top; x<bottom; x++){ for (y=left, l= HEIGHT - left; y<right; y++, l--) { d = fb_addr + ((WIDTH * l + x) << 1); *d = COLOR; } } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "90", 2)) { unsigned int l; UINT16 *d = fb_addr; for (x=WIDTH - top + 1; x > WIDTH - bottom; x--){ for (y=left, l=left; y<right; y++, l++) { d = fb_addr + ((WIDTH * l + x) << 1); *d = COLOR; } } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)) { unsigned int height = (bottom - top); unsigned int width = (right - left); UINT16 *d = (UINT16 *)fb_addr + (HEIGHT - bottom) * WIDTH + (WIDTH - right); //UINT16 *pLine2 = (UINT16*)addr; for (y = 0; y < height; ++ y) { for (x = 0; x < width; ++ x) { *(d+x) = COLOR; } d += WIDTH; } } else #endif { for (y = top; y < bottom; ++ y) { UINT16 *pPixel = pLine; for (x = left; x < right; ++ x) { *pPixel++ = COLOR; } pLine += WIDTH; } } }
static int wdma_config_yuv420(DISP_MODULE_ENUM module, DpColorFormat fmt, unsigned int dstPitch, unsigned int Height, unsigned long dstAddress, DISP_BUFFER_TYPE sec, void * handle) { unsigned int idx = wdma_index(module); unsigned int idx_offst = idx*DISP_WDMA_INDEX_OFFSET; size_t size; unsigned int u_off = 0; unsigned int v_off = 0; unsigned int u_stride = 0; unsigned int y_size = 0; unsigned int u_size = 0; unsigned int v_size = 0; unsigned int stride = dstPitch; int has_v = 1; if(fmt == eYV12) { y_size = stride * Height; u_stride = ALIGN_TO(stride/2,16); u_size = u_stride * Height/2; u_off = y_size; v_off = y_size + u_size; } else if(fmt == eYV21) { y_size = stride * Height; u_stride = ALIGN_TO(stride/2,16); u_size = u_stride * Height/2; u_off = y_size; v_off = y_size + u_size; } else if(fmt == eNV12 || fmt == eNV21) { y_size = stride * Height; u_stride = stride/2; u_size = u_stride * Height/2; u_off = y_size; has_v = 0; } if(sec != DISP_SECURE_BUFFER) { DISP_REG_SET(handle,idx_offst+DISP_REG_WDMA_DST_ADDR1, dstAddress + u_off); if(has_v) DISP_REG_SET(handle,idx_offst+DISP_REG_WDMA_DST_ADDR2, dstAddress + v_off); } else { int m4u_port; m4u_port = M4U_PORT_DISP_WDMA0; cmdqRecWriteSecure(handle, disp_addr_convert(idx_offst+DISP_REG_WDMA_DST_ADDR1), CMDQ_SAM_H_2_MVA, dstAddress, u_off, u_size, m4u_port); if(has_v) cmdqRecWriteSecure(handle, disp_addr_convert(idx_offst+DISP_REG_WDMA_DST_ADDR2), CMDQ_SAM_H_2_MVA, dstAddress, v_off, u_size, m4u_port); } DISP_REG_SET_FIELD(handle,DST_W_IN_BYTE_FLD_DST_W_IN_BYTE, idx_offst+DISP_REG_WDMA_DST_UV_PITCH, u_stride); return 0; }
void * nano_pvalloc(RARG size_t s) { return nano_valloc(RCALL ALIGN_TO(s, MALLOC_PAGE_ALIGN)); }
// --------------------------------------------------------------------------- // DBI Display Driver Public Functions // --------------------------------------------------------------------------- static DISP_STATUS dsi_config_ddp(UINT32 fbPA) { unsigned long irq_mask; struct disp_path_config_struct config = {0}; if (DISP_IsDecoupleMode()) { config.srcModule = DISP_MODULE_RDMA0; } else { config.srcModule = DISP_MODULE_OVL; } config.bgROI.x = 0; config.bgROI.y = 0; config.bgROI.width = lcm_params->width; config.bgROI.height = lcm_params->height; config.bgColor = 0x0; // background color config.pitch = lcm_params->width*2; config.srcROI.x = 0;config.srcROI.y = 0; config.srcROI.height= lcm_params->height; config.srcROI.width= lcm_params->width; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.layer = DDP_OVL_LAYER_MUN-1; config.ovl_config.layer_en = 1; config.ovl_config.fmt = eRGB565; config.ovl_config.addr = fbPA; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.src_x = 0; config.ovl_config.src_y = 0; config.ovl_config.dst_x = 0; // ROI config.ovl_config.dst_y = 0; config.ovl_config.dst_w = lcm_params->width; config.ovl_config.dst_h = lcm_params->height; config.ovl_config.src_pitch = ALIGN_TO(lcm_params->width, MTK_FB_ALIGNMENT)*2; //pixel number config.ovl_config.keyEn = 0; config.ovl_config.key = 0xFF; // color key config.ovl_config.aen = 0; // alpha enable config.ovl_config.alpha = 0; LCD_LayerSetAddress(DDP_OVL_LAYER_MUN-1, fbPA); LCD_LayerSetFormat(DDP_OVL_LAYER_MUN-1, LCD_LAYER_FORMAT_RGB565); LCD_LayerSetOffset(DDP_OVL_LAYER_MUN-1, 0, 0); LCD_LayerSetSize(DDP_OVL_LAYER_MUN-1,lcm_params->width,lcm_params->height); LCD_LayerSetPitch(DDP_OVL_LAYER_MUN-1, ALIGN_TO(lcm_params->width, MTK_FB_ALIGNMENT) * 2); LCD_LayerEnable(DDP_OVL_LAYER_MUN-1, TRUE); if(lcm_params->dsi.mode == CMD_MODE) { config.dstModule = DISP_MODULE_DSI_CMD;// DISP_MODULE_WDMA1 } else { config.dstModule = DISP_MODULE_DSI_VDO;// DISP_MODULE_WDMA1 } config.outFormat = RDMA_OUTPUT_FORMAT_ARGB; if(lcm_params->dsi.mode != CMD_MODE) { DSI_Wait_VDO_Idle(); local_irq_save(irq_mask); disp_path_get_mutex(); } disp_path_config(&config); // Config FB_Layer port to be physical. { M4U_PORT_STRUCT portStruct; portStruct.ePortID = M4U_PORT_LCD_OVL; //hardware port ID, defined in M4U_PORT_ID_ENUM portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; //domain : 0 1 2 3 portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } // hook m4u debug callback function m4u_set_tf_callback(M4U_CLNTMOD_DISP, &disp_m4u_dump_reg); if(lcm_params->dsi.mode != CMD_MODE) { disp_path_release_mutex(); DSI_Start(); local_irq_restore(irq_mask); } printk("%s, config done\n", __func__); return DISP_STATUS_OK; }
/* * get_throw_trampoline: * * Generate a call to mono_amd64_throw_exception/ * mono_amd64_throw_corlib_exception. */ static gpointer get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot) { guint8* start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space; const guint kMaxCodeSize = 256; #ifdef TARGET_WIN32 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */ #else dummy_stack_space = 0; #endif start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize); /* The stack is unaligned on entry */ stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8; code = start; if (info) unwind_ops = mono_arch_get_cie_program (); /* Alloc frame */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size); if (info) mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8); /* * To hide linux/windows calling convention differences, we pass all arguments on * the stack by passing 6 dummy values in registers. */ arg_offsets [0] = dummy_stack_space + 0; arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t); arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2; ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4; regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs); /* Save registers */ for (i = 0; i < AMD64_NREG; ++i) if (i != AMD64_RSP) amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t)); /* Save RSP */ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t)); amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t)); /* Save IP */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t)); amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t)); /* Set arg1 == ctx */ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset); amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t)); /* Set arg2 == exc/ex_token_index */ if (resume_unwind) amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t)); else amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t)); /* Set arg3 == rethrow/pc offset */ if (resume_unwind) { amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t)); } else if (corlib) { if (llvm_abs) /* * The caller doesn't pass in a pc/pc offset, instead we simply use the * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception (). */ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t)); else amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t)); } else { amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t)); } if (aot) { const char *icall_name; if (resume_unwind) icall_name = "mono_amd64_resume_unwind"; else if (corlib) icall_name = "mono_amd64_throw_corlib_exception"; else icall_name = "mono_amd64_throw_exception"; ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception)); } amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); mono_arch_flush_icache (start, code - start); g_assert ((code - start) < kMaxCodeSize); mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL); if (info) *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops); return start; }
static int c_variant_reserve(CVariant *cv, size_t n_extra_vecs, size_t front_alignment, size_t front_allocation, void **frontp, size_t tail_alignment, size_t tail_allocation, void **tailp) { CVariantLevel *level; size_t i, j, n, rem, n_front, n_tail; struct iovec *vec_front, *vec_tail; void *p; int r; /* * This advances the front and tail markers according to the requested * allocation size. If an alignment is given, the start is aligned * before the marker is advanced. If required, new buffer space is * allocated. * * On success, a pointer to the start of each reserved buffer space is * returned in @frontp and @tailp. On failure, both markers will stay * untouched. * * Note that front-alignment is always according to the global * alignment (i.e., it adheres to level->offset (and as such iov_base) * rather than level->i_front). But tail-alignment is always local-only * (adhering to level->i_tail). There is no global context for tail * space, so no way to align it as such. */ /* both are mapped, hence cannot overflow size_t (with alignment) */ assert(front_allocation + tail_allocation + 16 > front_allocation); level = cv->state->levels + cv->state->i_levels; n_front = front_allocation + ALIGN_TO(level->offset, 1 << front_alignment) - level->offset; n_tail = tail_allocation + ALIGN_TO(level->i_tail, 1 << tail_alignment) - level->i_tail; vec_front = cv->vecs + level->v_front; vec_tail = cv->vecs + cv->n_vecs - level->v_tail - 1; /* * If the remaining space is not enough to fullfill the request, search * through the unused vectors, in case there is unused buffer space * that is sufficient for the request. If we find one, move it directly * next to our current vector, so we can jump over. */ if (n_front > vec_front->iov_len - level->i_front) { for (i = 1; vec_front + i < vec_tail; ++i) { if (n_front > (vec_front + i)->iov_len) continue; c_variant_swap_vecs(cv, (vec_front + i) - cv->vecs, (vec_front + 1) - cv->vecs); ++vec_front; n_front = 0; break; } } else if (n_front > 0) { /* fits into @vec_front */ n_front = 0; } /* counter-part for tail-allocation */ if (n_tail > vec_tail->iov_len - level->i_tail) { for (i = 1; vec_tail - i > vec_front; ++i) { if (n_tail > (vec_tail - i)->iov_len) continue; c_variant_swap_vecs(cv, (vec_tail - i) - cv->vecs, (vec_tail - 1) - cv->vecs); --vec_tail; n_tail = 0; break; } } else if (n_tail > 0) { /* fits into @vec_tail */ n_tail = 0; } n = vec_tail - vec_front - 1; if (_unlikely_(n < n_extra_vecs + 2 * !!(n_front || n_tail))) { /* remember tail-index since realloc might move it */ j = vec_front - cv->vecs; i = cv->n_vecs - (vec_tail - cv->vecs); r = c_variant_insert_vecs(cv, j + 1, n_extra_vecs + 2); if (r < 0) return r; /* re-calculate vectors, as they might have moved */ vec_front = cv->vecs + j; vec_tail = cv->vecs + cv->n_vecs - i; } /* if either is non-zero, we need a new buffer allocation */ if (_unlikely_(n_front || n_tail)) { /* * Now that we have the iovecs, we need the actual buffer * space. We start with 2^12 bytes (4k / one page), and * increase it for each allocated buffer by a factor of 2, up * to an arbitrary limit of 2^31. */ n = 1 << (12 + ((cv->a_vecs > 19) ? 19 : cv->a_vecs)); if (n < n_front + n_tail + 16) n = n_front + n_tail + 16; p = malloc(n); if (!p) { n = n_front + n_tail + 16; p = malloc(n); if (!p) return c_variant_poison(cv, -ENOMEM); } /* count how often we allocated; protect against overflow */ if (++cv->a_vecs < 1) --cv->a_vecs; if (n_front) { ++vec_front; if (((char *)(cv->vecs + cv->n_vecs))[vec_front - cv->vecs]) free(vec_front->iov_base); vec_front->iov_base = p; vec_front->iov_len = n; ((char *)(cv->vecs + cv->n_vecs))[vec_front - cv->vecs] = true; } if (n_tail) { --vec_tail; if (((char *)(cv->vecs + cv->n_vecs))[vec_tail - cv->vecs]) free(vec_tail->iov_base); vec_tail->iov_base = p; vec_tail->iov_len = n; ((char *)(cv->vecs + cv->n_vecs))[vec_tail - cv->vecs] = true; } if (n_front && n_tail) { /* if both allocated, we need to split properly */ rem = n - n_front - n_tail - 16; vec_front->iov_len = n_front + 8 + (rem * C_VARIANT_FRONT_SHARE / 100); vec_tail->iov_base = (char *)p + vec_front->iov_len; vec_tail->iov_len = n - vec_front->iov_len; ((char *)(cv->vecs + cv->n_vecs))[vec_tail - cv->vecs] = false; } } if (vec_front != cv->vecs + level->v_front) { /* vector was updated; clip previous and then advance */ assert(vec_front - 1 == cv->vecs + level->v_front); (vec_front - 1)->iov_len = level->i_front; ++level->v_front; level->i_front = 0; /* front vectors must be aligned according to current offset */ assert(vec_front->iov_base == ALIGN_PTR_TO(vec_front->iov_base, 8)); n = level->offset & 7; vec_front->iov_base = (char *)vec_front->iov_base + n; vec_front->iov_len -= n; } if (vec_tail != cv->vecs + cv->n_vecs - level->v_tail - 1) { /* vector was updated; clip previous and then advance */ assert(vec_tail + 1 == cv->vecs + cv->n_vecs - level->v_tail - 1); (vec_tail + 1)->iov_len = level->i_tail; ++level->v_tail; level->i_tail = 0; } /* * We are done! Apply alignment before returning a pointer to the * reserved space. Then advance the iterators, so the space is actually * reserved and will not get re-used. */ n = ALIGN_TO(level->offset, 1 << front_alignment) - level->offset; memset((char *)vec_front->iov_base + level->i_front, 0, n); level->i_front += n; level->offset += n; level->i_tail = ALIGN_TO(level->i_tail, 1 << tail_alignment); if (frontp) *frontp = (char *)vec_front->iov_base + level->i_front; if (tailp) *tailp = (char *)vec_tail->iov_base + level->i_tail; level->i_front += front_allocation; level->offset += front_allocation; level->i_tail += tail_allocation; return 0; }
static DISP_STATUS dpi_init(UINT32 fbVA, UINT32 fbPA, BOOL isLcmInited) { if (!disp_drv_dpi_init_context()) return DISP_STATUS_NOT_IMPLEMENTED; #ifndef MT65XX_NEW_DISP init_intermediate_buffers(fbPA); #else { struct disp_path_config_struct config = { 0 }; if (DISP_IsDecoupleMode()) config.srcModule = DISP_MODULE_RDMA; else config.srcModule = DISP_MODULE_OVL; config.bgROI.x = 0; config.bgROI.y = 0; config.bgROI.width = DISP_GetScreenWidth(); config.bgROI.height = DISP_GetScreenHeight(); config.bgColor = 0x0; /* background color */ config.srcROI.x = 0; config.srcROI.y = 0; config.srcROI.height = DISP_GetScreenHeight(); config.srcROI.width = DISP_GetScreenWidth(); config.ovl_config.source = OVL_LAYER_SOURCE_MEM; #if 0 /* Disable all layers. */ /* First disable FB_Layer. */ disp_path_get_mutex(); config.ovl_config.layer = 0; config.ovl_config.layer_en = 0; disp_path_config_layer(&config.ovl_config); config.ovl_config.layer = 1; disp_path_config_layer(&config.ovl_config); config.ovl_config.layer = 2; disp_path_config_layer(&config.ovl_config); config.ovl_config.layer = 3; disp_path_config_layer(&config.ovl_config); disp_path_release_mutex(); disp_path_wait_reg_update(); #endif /* Config FB_Layer port to be virtual. */ { M4U_PORT_STRUCT portStruct; portStruct.ePortID = DISP_OVL_0; /* hardware port ID, defined in M4U_PORT_ID_ENUM */ portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; /* domain : 0 1 2 3 */ portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } /* Reconfig FB_Layer and enable it. */ config.ovl_config.layer = FB_LAYER; config.ovl_config.layer_en = 1; config.ovl_config.fmt = eRGB565; config.ovl_config.addr = fbPA; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.src_x = 0; config.ovl_config.src_y = 0; config.ovl_config.dst_x = 0; /* ROI */ config.ovl_config.dst_y = 0; config.ovl_config.dst_w = DISP_GetScreenWidth(); config.ovl_config.dst_h = DISP_GetScreenHeight(); config.ovl_config.src_pitch = ALIGN_TO(DISP_GetScreenWidth(), 32) * 2; config.ovl_config.keyEn = 0; config.ovl_config.key = 0xFF; /* color key */ config.ovl_config.aen = 0; /* alpha enable */ config.ovl_config.alpha = 0; LCD_LayerSetAddress(FB_LAYER, fbPA); LCD_LayerSetFormat(FB_LAYER, LCD_LAYER_FORMAT_RGB565); LCD_LayerSetOffset(FB_LAYER, 0, 0); LCD_LayerSetSize(FB_LAYER, DISP_GetScreenWidth(), DISP_GetScreenHeight()); LCD_LayerSetPitch(FB_LAYER, ALIGN_TO(DISP_GetScreenWidth(), 32) * 2); LCD_LayerEnable(FB_LAYER, TRUE); config.dstModule = DISP_MODULE_DPI0; /* DISP_MODULE_WDMA1 */ config.outFormat = RDMA_OUTPUT_FORMAT_ARGB; disp_path_get_mutex(); disp_path_config(&config); disp_path_release_mutex(); } #endif init_mipi_pll(); init_io_pad(); init_io_driving_current(); init_lcd(); init_dpi(isLcmInited); if (NULL != lcm_drv->init && !isLcmInited) lcm_drv->init(); DSI_PowerOn(); DSI_PowerOff(); UFOE_PowerOn(); UFOE_PowerOff(); #ifdef SPM_SODI_ENABLED spm_sodi_lcm_video_mode(TRUE); #endif return DISP_STATUS_OK; }
gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot) { guint8 *code; guint8* start; int size, offset, gregs_offset, fregs_offset, ctx_offset, num_fregs, frame_size; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; size = 512; start = code = mono_global_codeman_reserve (size); /* Compute stack frame size and offsets */ offset = 0; /* frame block */ offset += 2 * 8; /* gregs */ gregs_offset = offset; offset += 32 * 8; /* fregs */ num_fregs = 8; fregs_offset = offset; offset += num_fregs * 8; ctx_offset = offset; ctx_offset += 8; frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* * We are being called from C code, ctx is in r0, the address to call is in r1. * We need to save state, restore ctx, make the call, then restore the previous state, * returning the value returned by the call. */ /* Setup a frame */ arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -frame_size); arm_movspx (code, ARMREG_FP, ARMREG_SP); /* Save ctx */ arm_strx (code, ARMREG_R0, ARMREG_FP, ctx_offset); /* Save gregs */ code = mono_arm_emit_store_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS | (1 << ARMREG_FP), ARMREG_FP, gregs_offset); /* No need to save/restore fregs, since we don't currently use them */ /* Load regs from ctx */ code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS, ARMREG_R0, MONO_STRUCT_OFFSET (MonoContext, regs)); /* Load fp */ arm_ldrx (code, ARMREG_FP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoContext, regs) + (ARMREG_FP * 8)); /* Make the call */ arm_blrx (code, ARMREG_R1); /* For filters, the result is in R0 */ /* Restore fp */ arm_ldrx (code, ARMREG_FP, ARMREG_SP, gregs_offset + (ARMREG_FP * 8)); /* Load ctx */ arm_ldrx (code, ARMREG_IP0, ARMREG_FP, ctx_offset); /* Save registers back to ctx */ /* This isn't strictly neccessary since we don't allocate variables used in eh clauses to registers */ code = mono_arm_emit_store_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS, ARMREG_IP0, MONO_STRUCT_OFFSET (MonoContext, regs)); /* Restore regs */ code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS, ARMREG_FP, gregs_offset); /* Destroy frame */ code = mono_arm_emit_destroy_frame (code, frame_size, (1 << ARMREG_IP0)); arm_retx (code, ARMREG_LR); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL); if (info) *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops); return start; }
status_t CameraMetadata::writeToParcel(Parcel& data, const camera_metadata_t* metadata) { status_t res = OK; /** * Below is the camera metadata parcel layout: * * |--------------------------------------------| * | arg0: blobSize | * | (length = 4) | * |--------------------------------------------|<--Skip the rest if blobSize == 0. * | | * | | * | arg1: blob | * | (length = variable, see arg1 layout below) | * | | * | | * |--------------------------------------------| * | arg2: offset | * | (length = 4) | * |--------------------------------------------| */ // arg0 = blobSize (int32) if (metadata == NULL) { // Write zero blobSize for null metadata. return data.writeInt32(0); } /** * Always make the blob size sufficiently larger, as we need put alignment * padding and metadata into the blob. Since we don't know the alignment * offset before writeBlob. Then write the metadata to aligned offset. */ const size_t metadataSize = get_camera_metadata_compact_size(metadata); const size_t alignment = get_camera_metadata_alignment(); const size_t blobSize = metadataSize + alignment; res = data.writeInt32(static_cast<int32_t>(blobSize)); if (res != OK) { return res; } size_t offset = 0; /** * arg1 = metadata (blob). * * The blob size is the sum of front padding size, metadata size and back padding * size, which is equal to metadataSize + alignment. * * The blob layout is: * |------------------------------------|<----Start address of the blob (unaligned). * | front padding | * | (size = offset) | * |------------------------------------|<----Aligned start address of metadata. * | | * | | * | metadata | * | (size = metadataSize) | * | | * | | * |------------------------------------| * | back padding | * | (size = alignment - offset) | * |------------------------------------|<----End address of blob. * (Blob start address + blob size). */ WritableBlob blob; do { res = data.writeBlob(blobSize, false, &blob); if (res != OK) { break; } const uintptr_t metadataStart = ALIGN_TO(blob.data(), alignment); offset = metadataStart - reinterpret_cast<uintptr_t>(blob.data()); ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu", __FUNCTION__, alignment, reinterpret_cast<const void *>(metadataStart), offset); copy_camera_metadata(reinterpret_cast<void*>(metadataStart), metadataSize, metadata); // Not too big of a problem since receiving side does hard validation // Don't check the size since the compact size could be larger if (validate_camera_metadata_structure(metadata, /*size*/NULL) != OK) { ALOGW("%s: Failed to validate metadata %p before writing blob", __FUNCTION__, metadata); } } while(false); blob.release(); // arg2 = offset (int32) res = data.writeInt32(static_cast<int32_t>(offset)); return res; }
static gpointer get_throw_trampoline (int size, gboolean corlib, gboolean rethrow, gboolean llvm, gboolean resume_unwind, const char *tramp_name, MonoTrampInfo **info, gboolean aot) { guint8 *start, *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; int i, offset, gregs_offset, fregs_offset, frame_size, num_fregs; code = start = mono_global_codeman_reserve (size); /* We are being called by JITted code, the exception object/type token is in R0 */ /* Compute stack frame size and offsets */ offset = 0; /* frame block */ offset += 2 * 8; /* gregs */ gregs_offset = offset; offset += 32 * 8; /* fregs */ num_fregs = 8; fregs_offset = offset; offset += num_fregs * 8; frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* Setup a frame */ arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -frame_size); arm_movspx (code, ARMREG_FP, ARMREG_SP); /* Save gregs */ code = mono_arm_emit_store_regarray (code, 0xffffffff, ARMREG_FP, gregs_offset); if (corlib && !llvm) /* The real LR is in R1 */ arm_strx (code, ARMREG_R1, ARMREG_FP, gregs_offset + (ARMREG_LR * 8)); /* Save fp/sp */ arm_ldrx (code, ARMREG_IP0, ARMREG_FP, 0); arm_strx (code, ARMREG_IP0, ARMREG_FP, gregs_offset + (ARMREG_FP * 8)); arm_addx_imm (code, ARMREG_IP0, ARMREG_FP, frame_size); arm_strx (code, ARMREG_IP0, ARMREG_FP, gregs_offset + (ARMREG_SP * 8)); /* Save fregs */ for (i = 0; i < num_fregs; ++i) arm_strfpx (code, ARMREG_D8 + i, ARMREG_FP, fregs_offset + (i * 8)); /* Call the C trampoline function */ /* Arg1 = exception object/type token */ arm_movx (code, ARMREG_R0, ARMREG_R0); /* Arg2 = caller ip */ if (corlib) { if (llvm) arm_ldrx (code, ARMREG_R1, ARMREG_FP, gregs_offset + (ARMREG_LR * 8)); else arm_movx (code, ARMREG_R1, ARMREG_R1); } else { arm_ldrx (code, ARMREG_R1, ARMREG_FP, 8); } /* Arg 3 = gregs */ arm_addx_imm (code, ARMREG_R2, ARMREG_FP, gregs_offset); /* Arg 4 = fregs */ arm_addx_imm (code, ARMREG_R3, ARMREG_FP, fregs_offset); /* Arg 5 = corlib */ arm_movzx (code, ARMREG_R4, corlib ? 1 : 0, 0); /* Arg 6 = rethrow */ arm_movzx (code, ARMREG_R5, rethrow ? 1 : 0, 0); /* Call the function */ if (aot) { const char *icall_name; if (resume_unwind) icall_name = "mono_arm_resume_unwind"; else icall_name = "mono_arm_throw_exception"; code = mono_arm_emit_aotconst (&ji, code, start, ARMREG_LR, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name); } else { gpointer icall_func; if (resume_unwind) icall_func = mono_arm_resume_unwind; else icall_func = mono_arm_throw_exception; code = mono_arm_emit_imm64 (code, ARMREG_LR, (guint64)icall_func); } arm_blrx (code, ARMREG_LR); /* This shouldn't return */ arm_brk (code, 0x0); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL); if (info) *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops); return start; }
void hdmi_update_impl(void) { HDMI_LOG("hdmi_update_impl\n"); int t = 0; //int ret = 0; //MdpkBitbltConfig pmdp; //int lcm_physical_rotation = 0; int pixelSize = p->hdmi_width * p->hdmi_height; int dataSize = pixelSize * hdmi_bpp; RET_VOID_IF_NOLOG(p->output_mode == HDMI_OUTPUT_MODE_DPI_BYPASS); if (pixelSize == 0) { HDMI_LOG("ignored[resolution is null]\n"); return; } //HDMI_FUNC(); if (down_interruptible(&hdmi_update_mutex)) { HDMI_LOG("[HDMI] can't get semaphore in\n"); return; } if (IS_HDMI_NOT_ON()) { goto done; } if (IS_HDMI_IN_VIDEO_MODE()) { goto done; } DBG_OnTriggerHDMI(); //LCD_WaitForNotBusy(); if (temp_va != 0 && hdmi_va != 0) { DdpkBitbltConfig pddp; int dstOffset; memset((void *)&pddp, 0, sizeof(DdpkBitbltConfig)); pddp.srcX = pddp.srcY = 0; pddp.srcW = p->lcm_width; pddp.srcH = p->lcm_height; pddp.srcWStride = p->lcm_width; pddp.srcHStride = p->lcm_height; pddp.srcAddr[0] = temp_va; pddp.srcFormat = eRGB888_K; pddp.srcBufferSize[0] = p->lcm_width * p->lcm_height * 3; pddp.srcPlaneNum = 1; pddp.dstX = 0; pddp.dstY = 0; pddp.dstFormat = eARGB8888_K; pddp.pitch = p->hdmi_width; pddp.dstWStride = p->hdmi_width; pddp.dstHStride = p->hdmi_height; pddp.dstPlaneNum = 1; pddp.orientation = 0; switch (pddp.orientation) { case 90: case 270: #if 1 { pddp.dstW = ALIGN_TO(p->lcm_height * p->hdmi_height / p->lcm_width * 95 / 100, 4); pddp.dstH = ALIGN_TO(p->hdmi_height * 95 / 100, 4); break; } #endif // fall through now case 0: case 180: { pddp.dstW = ALIGN_TO(p->hdmi_width * 95 / 100, 4); pddp.dstH = ALIGN_TO(p->hdmi_height * 95 / 100, 4); break; } default: HDMI_LOG("Unknown orientation %d\n", pddp.orientation); return; } dstOffset = (p->hdmi_height - pddp.dstH) / 2 * p->hdmi_width * hdmi_bpp + (p->hdmi_width - pddp.dstW) / 2 * hdmi_bpp; pddp.dstAddr[0] = hdmi_va;// + hdmi_buffer_write_id * p->hdmi_width * p->hdmi_height * hdmi_bpp + dstOffset; pddp.dstBufferSize[0] = p->hdmi_width * p->hdmi_height * hdmi_bpp; t = get_current_time_us(); DDPK_Bitblt_Config(DDPK_CH_HDMI_0, &pddp); DDPK_Bitblt(DDPK_CH_HDMI_0); } //HDMI_LOG("dstw=%d, dsth=%d, ori=%d\n", p.dstW, p.dstH, p.orientation); DBG_OnHDMIDone(); HDMI_LOG("cost %d us\n", get_current_time_us() - t); //hdmi_buffer_read_id = hdmi_buffer_write_id; //hdmi_buffer_write_id = (hdmi_buffer_write_id+1) % hdmi_params->intermediat_buffer_num; done: up(&hdmi_update_mutex); return; }
int bus_gvariant_get_size(const char *signature) { const char *p; int sum = 0, r; /* For fixed size structs. Fails for variable size structs. */ p = signature; while (*p != 0) { size_t n; r = signature_element_length(p, &n); if (r < 0) return r; else { char t[n+1]; memcpy(t, p, n); t[n] = 0; r = bus_gvariant_get_alignment(t); if (r < 0) return r; sum = ALIGN_TO(sum, r); } switch (*p) { case SD_BUS_TYPE_BOOLEAN: case SD_BUS_TYPE_BYTE: sum += 1; break; case SD_BUS_TYPE_INT16: case SD_BUS_TYPE_UINT16: sum += 2; break; case SD_BUS_TYPE_INT32: case SD_BUS_TYPE_UINT32: case SD_BUS_TYPE_UNIX_FD: sum += 4; break; case SD_BUS_TYPE_INT64: case SD_BUS_TYPE_UINT64: case SD_BUS_TYPE_DOUBLE: sum += 8; break; case SD_BUS_TYPE_STRUCT_BEGIN: case SD_BUS_TYPE_DICT_ENTRY_BEGIN: { char t[n-1]; memcpy(t, p + 1, n - 2); t[n - 2] = 0; r = bus_gvariant_get_size(t); if (r < 0) return r; sum += r; break; } case SD_BUS_TYPE_STRING: case SD_BUS_TYPE_OBJECT_PATH: case SD_BUS_TYPE_SIGNATURE: case SD_BUS_TYPE_ARRAY: case SD_BUS_TYPE_VARIANT: return -EINVAL; default: assert_not_reached("Unknown signature type"); } p += n; } r = bus_gvariant_get_alignment(signature); if (r < 0) return r; return ALIGN_TO(sum, r); }
int isp_resize_mem_data(struct isp_mem_resize_data *data) { int i; int ret = -1; struct isp_mem_resize_data *presizer_user = \ (struct isp_mem_resize_data *)data; u32 input_buffer_size, output_buffer_size; u32 input_nr_pages, output_nr_pages; struct page **input_pages = NULL; struct page **output_pages = NULL; unsigned long isp_addr_in = 0; unsigned long isp_addr_out = 0; struct isp_mem_resize_data resizer_param; unsigned long timeout; if (presizer_user == NULL) { printk(KERN_ERR "ISP_RESZ_ERR : Invalid user data\n"); return -EINVAL; } memcpy(&resizer_param, presizer_user, \ sizeof(struct isp_mem_resize_data)); DPRINTK_ISPPROC("\nRSZ input(%d-%d) - output(%d-%d)\n", resizer_param.input_width, resizer_param.input_height, resizer_param.output_width, resizer_param.output_height); DPRINTK_ISPPROC("RSZ start(%d-%d) - end(%d-%d)\n", resizer_param.left, resizer_param.top, resizer_param.crop_width, resizer_param.crop_height); if (presizer_user->datain == 0 || presizer_user->dataout == 0) return -EINVAL; ispresizer_enable(0); timeout = jiffies + msecs_to_jiffies(200); while (ispresizer_busy()) { if (time_after(jiffies, timeout)) return -EINVAL; msleep(1); } ispresizer_save_context(); ispresizer_free(); ispresizer_request(); /* set data path before configuring modules. */ ispresizer_config_datapath(RSZ_MEM_YUV, 0); input_buffer_size = ALIGN_TO(presizer_user->input_width* \ presizer_user->input_height*2 , 0x100); input_pages = map_user_memory_to_kernel(presizer_user->datain, input_buffer_size, &input_nr_pages); if (input_pages == NULL) { ret = -EINVAL; printk(KERN_ERR "ISP_RESZ_ERR: memory allocation failed\n"); goto exit_cleanup; } output_buffer_size = ALIGN_TO(presizer_user->output_width* \ presizer_user->output_height*2, 0x1000); output_pages = map_user_memory_to_kernel(presizer_user->dataout, output_buffer_size, &output_nr_pages); if (output_pages == NULL) { ret = -EINVAL; printk(KERN_ERR "ISP_RESZ_ERR: memory allocation failed\n"); goto exit_cleanup; } for (i = 0; i < output_nr_pages; ++i) flush_dcache_page(output_pages[i]); isp_addr_in = ispmmu_vmap_pages(input_pages, input_nr_pages); if (IS_ERR((void *)isp_addr_in)) { isp_addr_in = 0; ret = -EINVAL; printk(KERN_ERR "ISP_RESZ_ERR: isp mmu map failed\n"); goto exit_cleanup; } isp_addr_out = ispmmu_vmap_pages(output_pages, output_nr_pages); if (IS_ERR((void *)isp_addr_out)) { isp_addr_out = 0; ret = -EINVAL; printk(KERN_ERR "ISP_RESZ_ERR: isp mmu map failed\n"); goto exit_cleanup; } if ((resizer_param.left == 0) && (resizer_param.top == 0)) { ret = ispresizer_try_size(&resizer_param.input_width, &resizer_param.input_height, &resizer_param.output_width, &resizer_param.output_height); ret = ispresizer_config_size(resizer_param.input_width, resizer_param.input_height, resizer_param.output_width, resizer_param.output_height); ispresizer_set_inaddr(isp_addr_in); } else { ispresizer_trycrop(resizer_param.left, resizer_param.top, resizer_param.crop_width, resizer_param.crop_height, resizer_param.output_width, resizer_param.output_height); ispresizer_applycrop(); /*pixel alignment in 32bit space, vertical must be 0 per TRM */ isp_reg_writel(((resizer_param.left%16) << ISPRSZ_IN_START_HORZ_ST_SHIFT) | (0 << ISPRSZ_IN_START_VERT_ST_SHIFT), OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START); /* Align input address for cropping, per TRM */ ispresizer_set_inaddr(isp_addr_in + (resizer_param.top*resizer_param.input_width*2) + ((resizer_param.left/16)*32)); } ispresizer_set_inaddr(isp_addr_in); ispresizer_set_outaddr(isp_addr_out); ispresizer_config_ycpos(0); ispresizer_config_inlineoffset( ALIGN_TO(presizer_user->input_width*2, 32)); isp_set_callback(CBK_RESZ_DONE, rsz_isr, (void *) NULL, (void *)NULL); isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_wfc.done = 0; /* start resizer engine. */ ispresizer_enable(1); ret = wait_for_completion_timeout(&isp_wfc, msecs_to_jiffies(1000)); if (!ret) ispresizer_enable(0); timeout = jiffies + msecs_to_jiffies(50); while (ispresizer_busy()) { msleep(5); if (time_after(jiffies, timeout)) { printk(KERN_ERR "ISP_RESZ_ERR: Resizer still busy"); break; } } isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_unset_callback(CBK_RESZ_DONE); ret = 0; exit_cleanup: ispresizer_restore_context(); if (isp_addr_in != 0) ispmmu_vunmap(isp_addr_in); if (isp_addr_out != 0) ispmmu_vunmap(isp_addr_out); if (input_pages != NULL) { unmap_user_memory_from_kernel(input_pages, input_nr_pages); kfree(input_pages); } if (output_pages != NULL) { unmap_user_memory_from_kernel(output_pages, output_nr_pages); kfree(output_pages); } DPRINTK_ISPPROC("resizer exit.\n"); return ret; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { const char *tramp_name; guint8 *buf, *code, *tramp, *br_ex_check; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; int i, offset, frame_size, regarray_offset, lmf_offset, caller_ip_offset, arg_offset; int cfa_offset; /* cfa = cfa_reg + cfa_offset */ code = buf = mono_global_codeman_reserve (256); /* Note that there is a single argument to the trampoline * and it is stored at: esp + pushed_args * sizeof (target_mgreg_t) * the ret address is at: esp + (pushed_args + 1) * sizeof (target_mgreg_t) */ /* Compute frame offsets relative to the frame pointer %ebp */ arg_offset = sizeof (target_mgreg_t); caller_ip_offset = 2 * sizeof (target_mgreg_t); offset = 0; offset += sizeof (MonoLMF); lmf_offset = -offset; offset += X86_NREG * sizeof (target_mgreg_t); regarray_offset = -offset; /* Argument area */ offset += 4 * sizeof (target_mgreg_t); frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* ret addr and arg are on the stack */ cfa_offset = 2 * sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset); // IP saved at CFA - 4 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -4); /* Allocate frame */ x86_push_reg (code, X86_EBP); cfa_offset += sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, -cfa_offset); x86_mov_reg_reg (code, X86_EBP, X86_ESP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP); /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, frame_size + sizeof (target_mgreg_t)); /* Save all registers */ for (i = X86_EAX; i <= X86_EDI; ++i) { int reg = i; if (i == X86_EBP) { /* Save original ebp */ /* EAX is already saved */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t)); reg = X86_EAX; } else if (i == X86_ESP) { /* Save original esp */ /* EAX is already saved */ x86_mov_reg_reg (code, X86_EAX, X86_EBP); /* Saved ebp + trampoline arg + return addr */ x86_alu_reg_imm (code, X86_ADD, X86_EAX, 3 * sizeof (target_mgreg_t)); reg = X86_EAX; } x86_mov_membase_reg (code, X86_EBP, regarray_offset + (i * sizeof (target_mgreg_t)), reg, sizeof (target_mgreg_t)); } /* Setup LMF */ /* eip */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), 0, sizeof (target_mgreg_t)); } else { x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t)); } /* method */ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) { x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), X86_EAX, sizeof (target_mgreg_t)); } else { x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof (target_mgreg_t)); } /* esp */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esp), X86_EAX, sizeof (target_mgreg_t)); /* callee save registers */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBX * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EDI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), X86_EAX, sizeof (target_mgreg_t)); /* Push LMF */ /* get the address of lmf for the current thread */ if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr"); x86_call_reg (code, X86_EAX); } else { x86_call_code (code, mono_get_lmf_addr); } /* lmf->lmf_addr = lmf_addr (%eax) */ x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), X86_EAX, sizeof (target_mgreg_t)); /* lmf->previous_lmf = *(lmf_addr) */ x86_mov_reg_membase (code, X86_ECX, X86_EAX, 0, sizeof (target_mgreg_t)); /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */ x86_alu_reg_imm (code, X86_ADD, X86_ECX, 1); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), X86_ECX, sizeof (target_mgreg_t)); /* *lmf_addr = lmf */ x86_lea_membase (code, X86_ECX, X86_EBP, lmf_offset); x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t)); /* Call trampoline function */ /* Arg 1 - registers */ x86_lea_membase (code, X86_EAX, X86_EBP, regarray_offset); x86_mov_membase_reg (code, X86_ESP, (0 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); /* Arg2 - calling code */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { x86_mov_membase_imm (code, X86_ESP, (1 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t)); } else { x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, (1 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); } /* Arg3 - trampoline argument */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, (2 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); /* Arg4 - trampoline address */ // FIXME: x86_mov_membase_imm (code, X86_ESP, (3 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t)); #ifdef __APPLE__ /* check the stack is aligned after the ret ip is pushed */ /* x86_mov_reg_reg (code, X86_EDX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_EDX, 15); x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0); x86_branch_disp (code, X86_CC_Z, 3, FALSE); x86_breakpoint (code); */ #endif if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_TRAMPOLINE_FUNC_ADDR, GINT_TO_POINTER (tramp_type)); x86_call_reg (code, X86_EAX); } else { tramp = (guint8*)mono_get_trampoline_func (tramp_type); x86_call_code (code, tramp); } /* * Overwrite the trampoline argument with the address we need to jump to, * to free %eax. */ x86_mov_membase_reg (code, X86_EBP, arg_offset, X86_EAX, 4); /* Restore LMF */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (target_mgreg_t)); x86_alu_reg_imm (code, X86_SUB, X86_ECX, 1); x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t)); /* Check for interruptions */ if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise"); x86_call_reg (code, X86_EAX); } else { x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint_noraise); } x86_test_reg_reg (code, X86_EAX, X86_EAX); br_ex_check = code; x86_branch8 (code, X86_CC_Z, -1, 1); /* * Exception case: * We have an exception we want to throw in the caller's frame, so pop * the trampoline frame and throw from the caller. */ x86_leave (code); /* * The exception is in eax. * We are calling the throw trampoline used by OP_THROW, so we have to setup the * stack to look the same. * The stack contains the ret addr, and the trampoline argument, the throw trampoline * expects it to contain the ret addr and the exception. It also needs to be aligned * after the exception is pushed. */ /* Align stack */ x86_push_reg (code, X86_EAX); /* Push the exception */ x86_push_reg (code, X86_EAX); //x86_breakpoint (code); /* Push the original return value */ x86_push_membase (code, X86_ESP, 3 * 4); /* * EH is initialized after trampolines, so get the address of the variable * which contains throw_exception, and load it from there. */ if (aot) { /* Not really a jit icall */ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "rethrow_preserve_exception_addr"); } else { x86_mov_reg_imm (code, X86_ECX, (guint8*)mono_get_rethrow_preserve_exception_addr ()); } x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t)); x86_jump_reg (code, X86_ECX); /* Normal case */ mono_x86_patch (br_ex_check, code); /* Restore registers */ for (i = X86_EAX; i <= X86_EDI; ++i) { if (i == X86_ESP || i == X86_EBP) continue; if (i == X86_EAX && tramp_type != MONO_TRAMPOLINE_AOT_PLT) continue; x86_mov_reg_membase (code, i, X86_EBP, regarray_offset + (i * 4), 4); } /* Restore frame */ x86_leave (code); cfa_offset -= sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset); mono_add_unwind_op_same_value (unwind_ops, code, buf, X86_EBP); if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) { /* Load the value returned by the trampoline */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 4); /* The trampoline returns normally, pop the trampoline argument */ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); cfa_offset -= sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); x86_ret (code); } else { x86_ret (code); } g_assert ((code - buf) <= 256); MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL)); tramp_name = mono_get_generic_trampoline_name (tramp_type); *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops); return buf; }
static DISP_STATUS dbi_init(UINT32 fbVA, UINT32 fbPA, BOOL isLcmInited) { if (!disp_drv_dbi_init_context()) return DISP_STATUS_NOT_IMPLEMENTED; #ifdef MT65XX_NEW_DISP { struct disp_path_config_struct config = {0}; config.srcModule = DISP_MODULE_OVL; config.bgROI.x = 0; config.bgROI.y = 0; config.bgROI.width = DISP_GetScreenWidth(); config.bgROI.height = DISP_GetScreenHeight(); config.bgColor = 0x0; // background color config.pitch = DISP_GetScreenWidth()*2; config.srcROI.x = 0;config.srcROI.y = 0; config.srcROI.height= DISP_GetScreenHeight();config.srcROI.width= DISP_GetScreenWidth(); config.ovl_config.source = OVL_LAYER_SOURCE_MEM; // Config FB_Layer port to be physical. #if 1 // defined(MTK_M4U_SUPPORT) { M4U_PORT_STRUCT portStruct; portStruct.ePortID = M4U_PORT_LCD_OVL; //hardware port ID, defined in M4U_PORT_ID_ENUM portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; //domain : 0 1 2 3 portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } #endif // Reconfig FB_Layer and enable it. config.ovl_config.layer = FB_LAYER; config.ovl_config.layer_en = 1; config.ovl_config.fmt = OVL_INPUT_FORMAT_RGB565; config.ovl_config.addr = fbPA; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.src_x = 0; config.ovl_config.src_y = 0; config.ovl_config.src_w = DISP_GetScreenWidth(); config.ovl_config.src_h = DISP_GetScreenHeight(); config.ovl_config.dst_x = 0; // ROI config.ovl_config.dst_y = 0; config.ovl_config.dst_w = DISP_GetScreenWidth(); config.ovl_config.dst_h = DISP_GetScreenHeight(); config.ovl_config.src_pitch = ALIGN_TO(DISP_GetScreenWidth(),32)*2; //pixel number config.ovl_config.keyEn = 0; config.ovl_config.key = 0xFF; // color key config.ovl_config.aen = 0; // alpha enable config.ovl_config.alpha = 0; LCD_LayerSetAddress(FB_LAYER, fbPA); LCD_LayerSetFormat(FB_LAYER, LCD_LAYER_FORMAT_RGB565); LCD_LayerSetOffset(FB_LAYER, 0, 0); LCD_LayerSetSize(FB_LAYER,DISP_GetScreenWidth(),DISP_GetScreenHeight()); LCD_LayerSetPitch(FB_LAYER, ALIGN_TO(DISP_GetScreenWidth(),32) * 2); LCD_LayerEnable(FB_LAYER, TRUE); config.dstModule = DISP_MODULE_DBI;// DISP_MODULE_WDMA1 config.outFormat = RDMA_OUTPUT_FORMAT_ARGB; disp_path_config(&config); disp_bls_config(); } #endif init_io_pad(); init_io_driving_current(); init_lcd(isLcmInited); if (NULL != lcm_drv->init && !isLcmInited) { lcm_drv->init(); } init_lcd_te_control(); return DISP_STATUS_OK; }
/* * mono_arch_create_sdb_trampoline: * * Return a trampoline which captures the current context, passes it to * mini_get_dbg_callbacks ()->single_step_from_context ()/mini_get_dbg_callbacks ()->breakpoint_from_context (), * then restores the (potentially changed) context. */ guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot) { int tramp_size = 256; int framesize, ctx_offset, cfa_offset; guint8 *code, *buf; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; code = buf = mono_global_codeman_reserve (tramp_size); framesize = 0; /* Argument area */ framesize += sizeof (target_mgreg_t); framesize = ALIGN_TO (framesize, 8); ctx_offset = framesize; framesize += sizeof (MonoContext); framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT); // CFA = sp + 4 cfa_offset = 4; mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, 4); // IP saved at CFA - 4 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset); x86_push_reg (code, X86_EBP); cfa_offset += sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset); x86_mov_reg_reg (code, X86_EBP, X86_ESP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP); /* The + 8 makes the stack aligned */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, framesize + 8); /* Initialize a MonoContext structure on the stack */ x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), X86_EAX, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), X86_ECX, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), X86_EDX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_reg (code, X86_EAX, X86_EBP); x86_alu_reg_imm (code, X86_ADD, X86_EAX, cfa_offset); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, 4, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), X86_EAX, sizeof (target_mgreg_t)); /* Call the single step/breakpoint function in sdb */ x86_lea_membase (code, X86_EAX, X86_ESP, ctx_offset); x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, sizeof (target_mgreg_t)); if (aot) { x86_breakpoint (code); } else { if (single_step) x86_call_code (code, mini_get_dbg_callbacks ()->single_step_from_context); else x86_call_code (code, mini_get_dbg_callbacks ()->breakpoint_from_context); } /* Restore registers from ctx */ /* Overwrite the saved ebp */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, 0, X86_EAX, sizeof (target_mgreg_t)); /* Overwrite saved eip */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, 4, X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EBX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ECX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EDX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ESI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EDI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), sizeof (target_mgreg_t)); x86_leave (code); cfa_offset -= sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset); x86_ret (code); mono_arch_flush_icache (code, code - buf); MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL)); g_assert (code - buf <= tramp_size); const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline"; *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops); return buf; }
static DISP_STATUS dsi_init(UINT32 fbVA, UINT32 fbPA, BOOL isLcmInited) { if (!disp_drv_dsi_init_context()) return DISP_STATUS_NOT_IMPLEMENTED; if(lcm_params->dsi.mode == CMD_MODE) { #ifndef MT65XX_NEW_DISP init_lcd(); #endif init_dsi(isLcmInited); if (NULL != lcm_drv->init && !isLcmInited) { lcm_drv->init(); DSI_LP_Reset(); } #ifndef MT65XX_NEW_DISP DSI_clk_HS_mode(0); #else DSI_clk_HS_mode(1); #endif DSI_SetMode(lcm_params->dsi.mode); #ifndef MT65XX_NEW_DISP DPI_PowerOn(); DPI_PowerOff(); init_lcd_te_control(); #endif } else { #ifndef MT65XX_NEW_DISP init_intermediate_buffers(fbPA); init_lcd(); init_dpi(isLcmInited); #endif if (!isLcmInited) { DSI_SetMode(0); mdelay(100); DSI_DisableClk(); } else { is_video_mode_running = true; } init_dsi(isLcmInited); if (NULL != lcm_drv->init && !isLcmInited) { lcm_drv->init(); DSI_LP_Reset(); } DSI_SetMode(lcm_params->dsi.mode); #ifndef BUILD_UBOOT #ifndef MT65XX_NEW_DISP if(lcm_params->dsi.lcm_ext_te_monitor) { is_video_mode_running = false; LCD_TE_SetMode(LCD_TE_MODE_VSYNC_ONLY); LCD_TE_SetEdgePolarity(LCM_POLARITY_RISING); LCD_TE_Enable(FALSE); } if(lcm_params->dsi.noncont_clock) DSI_set_noncont_clk(false, lcm_params->dsi.noncont_clock_period); if(lcm_params->dsi.lcm_int_te_monitor) DSI_set_int_TE(false, lcm_params->dsi.lcm_int_te_period); #endif #endif } #ifdef MT65XX_NEW_DISP { struct disp_path_config_struct config = {0}; config.srcModule = DISP_MODULE_OVL; config.bgROI.x = 0; config.bgROI.y = 0; config.bgROI.width = DISP_GetScreenWidth(); config.bgROI.height = DISP_GetScreenHeight(); config.bgColor = 0x0; // background color config.pitch = DISP_GetScreenWidth()*2; config.srcROI.x = 0;config.srcROI.y = 0; config.srcROI.height= DISP_GetScreenHeight();config.srcROI.width= DISP_GetScreenWidth(); config.ovl_config.source = OVL_LAYER_SOURCE_MEM; if(lcm_params->dsi.mode != CMD_MODE) { config.ovl_config.layer = FB_LAYER; config.ovl_config.layer_en = 0; disp_path_get_mutex(); disp_path_config_layer(&config.ovl_config); disp_path_release_mutex(); disp_path_wait_reg_update(); } // Config FB_Layer port to be physical. { M4U_PORT_STRUCT portStruct; portStruct.ePortID = M4U_PORT_OVL_CH3; //hardware port ID, defined in M4U_PORT_ID_ENUM portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; //domain : 0 1 2 3 portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } config.ovl_config.layer = FB_LAYER; config.ovl_config.layer_en = 1; config.ovl_config.fmt = OVL_INPUT_FORMAT_RGB565; config.ovl_config.addr = fbPA; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.src_x = 0; config.ovl_config.src_y = 0; config.ovl_config.dst_x = 0; // ROI config.ovl_config.dst_y = 0; config.ovl_config.dst_w = DISP_GetScreenWidth(); config.ovl_config.dst_h = DISP_GetScreenHeight(); config.ovl_config.src_pitch = ALIGN_TO(DISP_GetScreenWidth(),32)*2; //pixel number config.ovl_config.keyEn = 0; config.ovl_config.key = 0xFF; // color key config.ovl_config.aen = 0; // alpha enable config.ovl_config.alpha = 0; LCD_LayerSetAddress(FB_LAYER, fbPA); LCD_LayerSetFormat(FB_LAYER, LCD_LAYER_FORMAT_RGB565); LCD_LayerSetOffset(FB_LAYER, 0, 0); LCD_LayerSetSize(FB_LAYER,DISP_GetScreenWidth(),DISP_GetScreenHeight()); LCD_LayerSetPitch(FB_LAYER, ALIGN_TO(DISP_GetScreenWidth(),32) * 2); LCD_LayerEnable(FB_LAYER, TRUE); if(lcm_params->dsi.mode == CMD_MODE) config.dstModule = DISP_MODULE_DSI_CMD;// DISP_MODULE_WDMA1 else config.dstModule = DISP_MODULE_DSI_VDO;// DISP_MODULE_WDMA1 config.outFormat = RDMA_OUTPUT_FORMAT_ARGB; if(lcm_params->dsi.mode != CMD_MODE) disp_path_get_mutex(); disp_path_config(&config); if(lcm_params->dsi.mode != CMD_MODE) disp_path_release_mutex(); // Disable LK UI layer (Layer2) if(lcm_params->dsi.mode != CMD_MODE) { config.ovl_config.layer = FB_LAYER-1; config.ovl_config.layer_en = 0; disp_path_get_mutex(); disp_path_config_layer(&config.ovl_config); disp_path_release_mutex(); disp_path_wait_reg_update(); } // Config LK UI layer port to be physical. { M4U_PORT_STRUCT portStruct; portStruct.ePortID = M4U_PORT_OVL_CH2; //hardware port ID, defined in M4U_PORT_ID_ENUM portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; //domain : 0 1 2 3 portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } } #endif return DISP_STATUS_OK; }
gpointer mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli) { GSharedVtCallInfo *info; CallInfo *caller_cinfo, *callee_cinfo; MonoMethodSignature *caller_sig, *callee_sig; int aindex, i; gboolean var_ret = FALSE; CallInfo *cinfo, *gcinfo; MonoMethodSignature *sig, *gsig; GPtrArray *map; if (gsharedvt_in) { caller_sig = normal_sig; callee_sig = gsharedvt_sig; caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); } else { callee_sig = normal_sig; caller_sig = gsharedvt_sig; callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); } /* * If GSHAREDVT_IN is true, this means we are transitioning from normal to gsharedvt code. The caller uses the * normal call signature, while the callee uses the gsharedvt signature. * If GSHAREDVT_IN is false, its the other way around. */ /* sig/cinfo describes the normal call, while gsig/gcinfo describes the gsharedvt call */ if (gsharedvt_in) { sig = caller_sig; gsig = callee_sig; cinfo = caller_cinfo; gcinfo = callee_cinfo; } else { sig = callee_sig; gsig = caller_sig; cinfo = callee_cinfo; gcinfo = caller_cinfo; } DEBUG_AMD64_GSHAREDVT_PRINT ("source sig: (%s) return (%s)\n", mono_signature_get_desc (caller_sig, FALSE), mono_type_full_name (mono_signature_get_return_type (caller_sig))); // Leak DEBUG_AMD64_GSHAREDVT_PRINT ("dest sig: (%s) return (%s)\n", mono_signature_get_desc (callee_sig, FALSE), mono_type_full_name (mono_signature_get_return_type (callee_sig))); if (gcinfo->ret.storage == ArgGsharedvtVariableInReg) { /* * The return type is gsharedvt */ var_ret = TRUE; } /* * The stack looks like this: * <arguments> * <trampoline frame> * <call area> * We have to map the stack slots in <arguments> to the stack slots in <call area>. */ map = g_ptr_array_new (); for (aindex = 0; aindex < cinfo->nargs; ++aindex) { ArgInfo *src_info = &caller_cinfo->args [aindex]; ArgInfo *dst_info = &callee_cinfo->args [aindex]; int *src = NULL, *dst = NULL; int nsrc = -1, ndst = -1, nslots = 0; int arg_marshal = GSHAREDVT_ARG_NONE; int arg_slots = 0; // Size in quadwords DEBUG_AMD64_GSHAREDVT_PRINT ("-- arg %d in (%s) out (%s)\n", aindex, arg_info_desc (src_info), arg_info_desc (dst_info)); switch (src_info->storage) { case ArgInIReg: case ArgInDoubleSSEReg: case ArgInFloatSSEReg: case ArgValuetypeInReg: case ArgOnStack: nsrc = get_arg_slots (src_info, &src, TRUE); break; case ArgGSharedVtInReg: handle_marshal_when_src_gsharedvt (dst_info, &arg_marshal, &arg_slots); handle_map_when_gsharedvt_in_reg (src_info, &nsrc, &src); break; case ArgGSharedVtOnStack: handle_marshal_when_src_gsharedvt (dst_info, &arg_marshal, &arg_slots); handle_map_when_gsharedvt_on_stack (src_info, &nsrc, &src, TRUE); break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: nsrc = get_arg_slots (src_info, &src, TRUE); break; default: g_error ("Gsharedvt can't handle source arg type %d", (int)src_info->storage); // Inappropriate value: ArgValuetypeAddrInIReg is for returns only } switch (dst_info->storage) { case ArgInIReg: case ArgInDoubleSSEReg: case ArgInFloatSSEReg: case ArgOnStack: case ArgValuetypeInReg: ndst = get_arg_slots (dst_info, &dst, FALSE); break; case ArgGSharedVtInReg: handle_marshal_when_dst_gsharedvt (src_info, &arg_marshal); handle_map_when_gsharedvt_in_reg (dst_info, &ndst, &dst); break; case ArgGSharedVtOnStack: handle_marshal_when_dst_gsharedvt (src_info, &arg_marshal); handle_map_when_gsharedvt_on_stack (dst_info, &ndst, &dst, FALSE); break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: ndst = get_arg_slots (dst_info, &dst, FALSE); break; default: g_error ("Gsharedvt can't handle dest arg type %d", (int)dst_info->storage); // See above } if (arg_marshal == GSHAREDVT_ARG_BYREF_TO_BYVAL && dst_info->byte_arg_size) { /* Have to load less than 4 bytes */ // FIXME: Signed types switch (dst_info->byte_arg_size) { case 1: arg_marshal = GSHAREDVT_ARG_BYREF_TO_BYVAL_U1; break; case 2: arg_marshal = GSHAREDVT_ARG_BYREF_TO_BYVAL_U2; break; default: arg_marshal = GSHAREDVT_ARG_BYREF_TO_BYVAL_U4; break; } } if (nsrc) src [0] |= (arg_marshal << SRC_DESCRIPTOR_MARSHAL_SHIFT) | (arg_slots << SLOT_COUNT_SHIFT); /* Merge and add to the global list*/ nslots = MIN (nsrc, ndst); DEBUG_AMD64_GSHAREDVT_PRINT ("nsrc %d ndst %d\n", nsrc, ndst); for (i = 0; i < nslots; ++i) add_to_map (map, src [i], dst [i]); g_free (src); g_free (dst); } DEBUG_AMD64_GSHAREDVT_PRINT ("-- return in (%s) out (%s) var_ret %d\n", arg_info_desc (&caller_cinfo->ret), arg_info_desc (&callee_cinfo->ret), var_ret); if (cinfo->ret.storage == ArgValuetypeAddrInIReg) { /* Both the caller and the callee pass the vtype ret address in r8 (System V) and RCX or RDX (Windows) */ g_assert (gcinfo->ret.storage == ArgValuetypeAddrInIReg || gcinfo->ret.storage == ArgGsharedvtVariableInReg); add_to_map (map, map_reg (cinfo->ret.reg), map_reg (cinfo->ret.reg)); } info = mono_domain_alloc0 (mono_domain_get (), sizeof (GSharedVtCallInfo) + (map->len * sizeof (int))); info->addr = addr; info->stack_usage = callee_cinfo->stack_usage; info->ret_marshal = GSHAREDVT_RET_NONE; info->gsharedvt_in = gsharedvt_in ? 1 : 0; info->vret_slot = -1; info->calli = calli; if (var_ret) { g_assert (gcinfo->ret.storage == ArgGsharedvtVariableInReg); info->vret_arg_reg = map_reg (gcinfo->ret.reg); DEBUG_AMD64_GSHAREDVT_PRINT ("mapping vreg_arg_reg to %d in reg %s\n", info->vret_arg_reg, mono_arch_regname (gcinfo->ret.reg)); } else { info->vret_arg_reg = -1; } #ifdef DEBUG_AMD64_GSHAREDVT printf ("final map:\n"); for (i = 0; i < map->len; i += 2) { printf ("\t[%d] src %x dst %x\n ", i / 2, GPOINTER_TO_UINT (g_ptr_array_index (map, i)), GPOINTER_TO_UINT (g_ptr_array_index (map, i + 1))); } #endif info->vcall_offset = vcall_offset; info->map_count = map->len / 2; for (i = 0; i < map->len; ++i) info->map [i] = GPOINTER_TO_UINT (g_ptr_array_index (map, i)); g_ptr_array_free (map, TRUE); /* Compute return value marshalling */ if (var_ret) { /* Compute return value marshalling */ switch (cinfo->ret.storage) { case ArgInIReg: if (!gsharedvt_in || sig->ret->byref) { info->ret_marshal = GSHAREDVT_RET_IREGS_1; } else { MonoType *ret = sig->ret; ret = mini_type_get_underlying_type (ret); switch (ret->type) { case MONO_TYPE_I1: info->ret_marshal = GSHAREDVT_RET_I1; break; case MONO_TYPE_U1: info->ret_marshal = GSHAREDVT_RET_U1; break; case MONO_TYPE_I2: info->ret_marshal = GSHAREDVT_RET_I2; break; case MONO_TYPE_U2: info->ret_marshal = GSHAREDVT_RET_U2; break; case MONO_TYPE_I4: info->ret_marshal = GSHAREDVT_RET_I4; break; case MONO_TYPE_U4: info->ret_marshal = GSHAREDVT_RET_U4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: case MONO_TYPE_U8: case MONO_TYPE_I8: info->ret_marshal = GSHAREDVT_RET_I8; break; case MONO_TYPE_GENERICINST: g_assert (!mono_type_generic_inst_is_valuetype (ret)); info->ret_marshal = GSHAREDVT_RET_I8; break; default: g_error ("Gsharedvt can't handle dst type [%d]", (int)sig->ret->type); } } break; case ArgValuetypeInReg: info->ret_marshal = GSHAREDVT_RET_IREGS_1 - 1 + cinfo->ret.nregs; g_assert (cinfo->ret.nregs == 1); // ABI supports 2-register return but we do not implement this. break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: info->ret_marshal = GSHAREDVT_RET_R8; break; case ArgValuetypeAddrInIReg: break; default: g_error ("Can't marshal return of storage [%d] %s", (int)cinfo->ret.storage, storage_name (cinfo->ret.storage)); } if (gsharedvt_in && cinfo->ret.storage != ArgValuetypeAddrInIReg) { /* Allocate stack space for the return value */ info->vret_slot = map_stack_slot (info->stack_usage / sizeof (gpointer)); info->stack_usage += mono_type_stack_size_internal (normal_sig->ret, NULL, FALSE) + sizeof (gpointer); } DEBUG_AMD64_GSHAREDVT_PRINT ("RET marshal is %s\n", ret_marshal_name [info->ret_marshal]); } info->stack_usage = ALIGN_TO (info->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); g_free (callee_cinfo); g_free (caller_cinfo); DEBUG_AMD64_GSHAREDVT_PRINT ("allocated an info at %p stack usage %d\n", info, info->stack_usage); return info; }
int disp_helper_get_option(DISP_HELPER_OPTION option) { /* DISPMSG("stage=0x%08x\n", disp_global_stage); */ switch (option) { case DISP_HELPER_OPTION_USE_CMDQ: { if (_is_normal_stage()) return 1; else if (_is_bringup_stage()) return 0; else if (_is_early_porting_stage()) return 0; else BUG_ON(1); } case DISP_HELPER_OPTION_USE_M4U: { if (_is_normal_stage()) return 1; else if (_is_bringup_stage()) return 0; else if (_is_early_porting_stage()) return 0; else BUG_ON(1); } case DISP_HELPER_OPTION_USE_CLKMGR: { if (_is_normal_stage()) return 1; else if (_is_bringup_stage()) return 0; else if (_is_early_porting_stage()) return 0; else BUG_ON(1); } case DISP_HELPER_OPTION_MIPITX_ON_CHIP: { if (_is_normal_stage()) return 1; else if (_is_bringup_stage()) return 1; else if (_is_early_porting_stage()) return 0; else BUG_ON(1); } case DISP_HELPER_OPTION_FAKE_LCM_X: { int x = 0; #ifdef CONFIG_CUSTOM_LCM_X /*x = simple_strtoul(CONFIG_CUSTOM_LCM_X, NULL, 0);*/ kstrtoint(CONFIG_CUSTOM_LCM_X , 0 , &x); #endif return x; } case DISP_HELPER_OPTION_FAKE_LCM_Y: { int y = 0; #ifdef CONFIG_CUSTOM_LCM_Y /*y = simple_strtoul(CONFIG_CUSTOM_LCM_Y, NULL, 0);*/ kstrtoint(CONFIG_CUSTOM_LCM_Y , 0 , &y); #endif return y; } case DISP_HELPER_OPTION_FAKE_LCM_WIDTH: { int x = 0; int w = DISP_GetScreenWidth(); #ifdef CONFIG_CUSTOM_LCM_X /*x = simple_strtoul(CONFIG_CUSTOM_LCM_X, NULL, 0);*/ kstrtoint(CONFIG_CUSTOM_LCM_X , 0 , &x); if (x != 0) w = ALIGN_TO(w, 16); #endif return w; } case DISP_HELPER_OPTION_FAKE_LCM_HEIGHT: { int h = DISP_GetScreenHeight(); return h; } default: { return _disp_helper_option_value[option]; } } return; }
static void show_logo(UINT32 index) { UINT32 logonum; UINT32 logolen; UINT32 inaddr; void *fb_addr = mt_get_fb_addr(); void *fb_tempaddr = mt_get_tempfb_addr(); UINT32 fb_size = mt_get_fb_size(); void *db_addr = mt_get_logo_db_addr(); unsigned int *pinfo = (unsigned int*)db_addr; logonum = pinfo[0]; ASSERT(index < logonum); if(index < logonum) logolen = pinfo[3+index] - pinfo[2+index]; else logolen = pinfo[1] - pinfo[2+index]; inaddr = (unsigned int)db_addr+pinfo[2+index]; printf("show_logo, in_addr=0x%08x, fb_addr=0x%08x, logolen=%d, ticks=%d\n", inaddr, fb_addr, logolen, get_ticks()); // mt_logo_decompress((void*)inaddr, (void*)fb_addr + 2 * fb_size, logolen, fb_size); #if 1 { unsigned short *d; int j,k; if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "270", 3)) { unsigned int l; unsigned short *s; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt_logo_decompress((void*)inaddr, fb_tempaddr, logolen, fb_size); s = fb_tempaddr; for (j=0; j<width; j++){ for (k=0, l=height-1; k<height; k++, l--) { d = fb_addr + ((ALIGN_TO(width, 32) * l + j) << 1); *d = *s++; } } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "90", 2)) { unsigned int l; unsigned short *s; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt_logo_decompress((void*)inaddr, fb_tempaddr, logolen, fb_size); s = fb_tempaddr; for (j=width - 1; j>=0; j--){ for (k=0, l=0; k<height; k++, l++) { d = fb_addr + ((ALIGN_TO(width, 32) * l + j) << 1); *d = *s++; } } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 2)) { unsigned short *s; unsigned short *d; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt_logo_decompress((void*)inaddr, fb_tempaddr, logolen, fb_size); s = fb_tempaddr + (2 * ((height - 1) * width)); d = fb_addr; for (j=0;j < height; j++){ { for (k=0;k < width; k++) { *(d+k) = *(s+width-k); } //memcpy(d, s, width * 2); d += ALIGN_TO(width, 32); s -= width; } } } else #endif { if(0 != CFG_DISPLAY_WIDTH % 32){ unsigned short *s; unsigned short *d; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt_logo_decompress((void*)inaddr, fb_tempaddr, logolen, fb_size); s = fb_tempaddr; d = fb_addr; for (j=0;j < height; j++){ { memcpy(d, s, width * 2); d += ALIGN_TO(width, 32); s += width; } } } else{ mt_logo_decompress((void*)inaddr, (void*)fb_addr, logolen, fb_size); } } } printf("ticks=%d\n", get_ticks()); }
/** Function nano_malloc * Algorithm: * Walk through the free list to find the first match. If fails to find * one, call sbrk to allocate a new chunk. */ void * nano_malloc(RARG malloc_size_t s) { chunk *p, *r; char * ptr, * align_ptr; int offset; malloc_size_t alloc_size; alloc_size = ALIGN_TO(s, CHUNK_ALIGN); /* size of aligned data load */ alloc_size += MALLOC_PADDING; /* padding */ alloc_size += CHUNK_OFFSET; /* size of chunk head */ alloc_size = MAX(alloc_size, MALLOC_MINCHUNK); if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s) { RERRNO = ENOMEM; return NULL; } MALLOC_LOCK; p = free_list; r = p; while (r) { int rem = r->size - alloc_size; if (rem >= 0) { if (rem >= MALLOC_MINCHUNK) { /* Find a chunk that much larger than required size, break * it into two chunks and return the second one */ r->size = rem; r = (chunk *)((char *)r + rem); r->size = alloc_size; } /* Find a chunk that is exactly the size or slightly bigger * than requested size, just return this chunk */ else if (p == r) { /* Now it implies p==r==free_list. Move the free_list * to next chunk */ free_list = r->next; } else { /* Normal case. Remove it from free_list */ p->next = r->next; } break; } p=r; r=r->next; } /* Failed to find a appropriate chunk. Ask for more memory */ if (r == NULL) { r = sbrk_aligned(RCALL alloc_size); /* sbrk returns -1 if fail to allocate */ if (r == (void *)-1) { RERRNO = ENOMEM; MALLOC_UNLOCK; return NULL; } r->size = alloc_size; } MALLOC_UNLOCK; ptr = (char *)r + CHUNK_OFFSET; align_ptr = (char *)ALIGN_TO((unsigned long)ptr, MALLOC_ALIGN); offset = align_ptr - ptr; if (offset) { /* Initialize sizeof (malloc_chunk.size) bytes at align_ptr - CHUNK_OFFSET with negative offset to the size field (at the start of the chunk). The negative offset to size from align_ptr - CHUNK_OFFSET is the size of any remaining padding minus CHUNK_OFFSET. This is equivalent to the total size of the padding, because the size of any remaining padding is the total size of the padding minus CHUNK_OFFSET. Note that the size of the padding must be at least CHUNK_OFFSET. The rest of the padding is not initialized. */ *(long *)((char *)r + offset) = -offset; } assert(align_ptr + size <= (char *)r + alloc_size); return align_ptr; }
static void fill_line_flow(UINT32 left, UINT32 top, UINT32 right, UINT32 bottom, char *addr) { void * fb_addr = mt_get_fb_addr(); const UINT32 WIDTH = ALIGN_TO(CFG_DISPLAY_WIDTH,32); const UINT32 HEIGHT = CFG_DISPLAY_HEIGHT; UINT16 *pLine = (UINT16 *)fb_addr + top * WIDTH + left; UINT16 *pLine2 = (UINT16*)addr; INT32 x, y; INT32 i = 0; if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "270", 3)) { unsigned int l; UINT16 *d = fb_addr; for (x=top; x<bottom; x++) { for (y=left, l= HEIGHT - left; y<right; y++, l--) { d = fb_addr + ((WIDTH * l + x) << 1); *d = pLine2[i++]; } i = 0; } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "90", 2)) { unsigned int l; UINT16 *d = fb_addr; for (x=WIDTH - top + 1; x > WIDTH - bottom; x--) { for (y=left, l=left; y<right; y++, l++) { d = fb_addr + ((WIDTH * l + x) << 1); *d = pLine2[i++]; } i = 0; } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)) { unsigned int height = (bottom - top); unsigned int width = (right - left); UINT16 *s = (UINT16*)addr + (2 * ((height - 1) * width)); UINT16 *d = (UINT16 *)fb_addr + (HEIGHT - bottom) * WIDTH + (WIDTH - right); //UINT16 *pLine2 = (UINT16*)addr; for (y = 0; y < height; ++ y) { for (x = 0; x < width; ++ x) { *(d+x) = *(s+width-x); } d += WIDTH; s -= width; } } else { for (y = top; y < bottom; ++ y) { UINT16 *pPixel = pLine; for (x = left; x < right; ++ x) { *pPixel++ = pLine2[i++]; } pLine += WIDTH; i = 0; } } }
// --------------------------------------------------------------------------- // DBI Display Driver Public Functions // --------------------------------------------------------------------------- static DISP_STATUS dsi_config_ddp(UINT32 fbPA) { struct disp_path_config_struct config = {0}; if (DISP_IsDecoupleMode()) { config.srcModule = DISP_MODULE_RDMA; } else { config.srcModule = DISP_MODULE_OVL; } config.bgROI.x = 0; config.bgROI.y = 0; config.bgROI.width = lcm_params->width; config.bgROI.height = lcm_params->height; config.bgColor = 0x0; // background color config.pitch = lcm_params->width*2; config.srcROI.x = 0;config.srcROI.y = 0; config.srcROI.height= lcm_params->height; config.srcROI.width= lcm_params->width; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; if(lcm_params->dsi.mode != CMD_MODE) { config.ovl_config.layer = DDP_OVL_LAYER_MUN-1; config.ovl_config.layer_en = 0; //disp_path_get_mutex(); disp_path_config_layer(&config.ovl_config); //disp_path_release_mutex(); //disp_path_wait_reg_update(); } #if 1 // Disable LK UI layer (Layer2) { config.ovl_config.layer = DDP_OVL_LAYER_MUN-1-1; config.ovl_config.layer_en = 0; // disable LK UI layer anyway //disp_path_get_mutex(); disp_path_config_layer(&config.ovl_config); //disp_path_release_mutex(); //disp_path_wait_reg_update(); } #endif config.ovl_config.layer = DDP_OVL_LAYER_MUN-1; config.ovl_config.layer_en = 1; config.ovl_config.fmt = eRGB565; config.ovl_config.addr = fbPA; config.ovl_config.source = OVL_LAYER_SOURCE_MEM; config.ovl_config.src_x = 0; config.ovl_config.src_y = 0; config.ovl_config.dst_x = 0; // ROI config.ovl_config.dst_y = 0; config.ovl_config.dst_w = lcm_params->width; config.ovl_config.dst_h = lcm_params->height; config.ovl_config.src_pitch = ALIGN_TO(lcm_params->width, MTK_FB_ALIGNMENT)*2; //pixel number config.ovl_config.keyEn = 0; config.ovl_config.key = 0xFF; // color key config.ovl_config.aen = 0; // alpha enable config.ovl_config.alpha = 0; /*LCD_LayerSetAddress(DDP_OVL_LAYER_MUN-1, fbPA); LCD_LayerSetFormat(DDP_OVL_LAYER_MUN-1, LCD_LAYER_FORMAT_RGB565); LCD_LayerSetOffset(DDP_OVL_LAYER_MUN-1, 0, 0); LCD_LayerSetSize(DDP_OVL_LAYER_MUN-1,lcm_params->width,lcm_params->height); LCD_LayerSetPitch(DDP_OVL_LAYER_MUN-1, ALIGN_TO(lcm_params->width, MTK_FB_ALIGNMENT) * 2); LCD_LayerEnable(DDP_OVL_LAYER_MUN-1, TRUE); */ LCD_LayerSetAddress(FB_LAYER, fbPA); LCD_LayerSetFormat(FB_LAYER, eRGB565); LCD_LayerSetOffset(FB_LAYER, 0, 0); LCD_LayerSetSize(FB_LAYER,lcm_params->width,lcm_params->height); LCD_LayerSetPitch(FB_LAYER, ALIGN_TO(lcm_params->width, MTK_FB_ALIGNMENT) * 2); LCD_LayerEnable(FB_LAYER, TRUE); if(lcm_params->dsi.mode == CMD_MODE) config.dstModule = DISP_MODULE_DSI_CMD;// DISP_MODULE_WDMA1 else config.dstModule = DISP_MODULE_DSI_VDO;// DISP_MODULE_WDMA1 config.outFormat = RDMA_OUTPUT_FORMAT_ARGB; disp_path_config(&config); if(lcm_params->dsi.mode != CMD_MODE) { //DSI_Wait_VDO_Idle(); disp_path_get_mutex(); } // Config FB_Layer port to be physical. { M4U_PORT_STRUCT portStruct; portStruct.ePortID = DISP_OVL_0; //hardware port ID, defined in M4U_PORT_ID_ENUM portStruct.Virtuality = 1; portStruct.Security = 0; portStruct.domain = 3; //domain : 0 1 2 3 portStruct.Distance = 1; portStruct.Direction = 0; m4u_config_port(&portStruct); } if(lcm_params->dsi.mode != CMD_MODE) { disp_path_release_mutex(); //if(1 == lcm_params->dsi.ufoe_enable) // UFOE_Start(); //DSI_Start(); } printk("%s, config done\n", __func__); return DISP_STATUS_OK; }
void main() { log_init(); fork_init(); /* fork_init() will directly jump to restored thread context if we are a fork child */ mm_init(); install_syscall_handler(); heap_init(); signal_init(); process_init(); tls_init(); vfs_init(); dbt_init(); /* Parse command line */ const char *cmdline = GetCommandLineA(); int len = strlen(cmdline); if (len > BLOCK_SIZE) /* TODO: Test if there is sufficient space for argv[] array */ { kprintf("Command line too long.\n"); process_exit(1, 0); } startup = mm_mmap(NULL, BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, INTERNAL_MAP_TOPDOWN | INTERNAL_MAP_NORESET, NULL, 0); *(uintptr_t*) startup = 1; char *current_startup_base = startup + sizeof(uintptr_t); memcpy(current_startup_base, cmdline, len + 1); char *envbuf = (char *)ALIGN_TO(current_startup_base + len + 1, sizeof(void*)); char *env0 = envbuf; ENV("TERM=xterm"); char *env1 = envbuf; ENV("HOME=/root"); char *env2 = envbuf; ENV("DISPLAY=127.0.0.1:0"); char *env3 = envbuf; ENV("PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin:/sbin"); int argc = 0; char **argv = (char **)ALIGN_TO(envbuf, sizeof(void*)); /* Parse command line */ int in_quote = 0; char *j = current_startup_base; for (char *i = current_startup_base; i <= current_startup_base + len; i++) if (!in_quote && (*i == ' ' || *i == '\t' || *i == '\r' || *i == '\n' || *i == 0)) { *i = 0; if (i > j) argv[argc++] = j; j = i + 1; } else if (*i == '"') { *i = 0; if (in_quote) argv[argc++] = j; in_quote = !in_quote; j = i + 1; } argv[argc] = NULL; char **envp = argv + argc + 1; int env_size = 4; envp[0] = env0; envp[1] = env1; envp[2] = env2; envp[3] = env3; envp[4] = NULL; char *buffer_base = (char*)(envp + env_size + 1); const char *filename = NULL; for (int i = 1; i < argc; i++) { if (argv[i][0] == '-') { } else if (!filename) filename = argv[i]; } if (filename) do_execve(filename, argc - 1, argv + 1, env_size, envp, buffer_base, NULL); kprintf("Usage: flinux <executable> [arguments]\n"); process_exit(1, 0); }