int ovl2mem_trigger(int blocking, void *callback, unsigned int userdata) { int ret = -1; DISPFUNC(); if(pgc->need_trigger_path == 0) { DISPMSG("ovl2mem_trigger do not trigger\n"); return ret; } _ovl2mem_path_lock(__func__); dpmgr_path_start(pgc->dpmgr_handle, ovl2mem_cmdq_enabled()); dpmgr_path_trigger(pgc->dpmgr_handle, pgc->cmdq_handle_config, ovl2mem_cmdq_enabled()); cmdqRecWait(pgc->cmdq_handle_config, CMDQ_EVENT_DISP_WDMA1_EOF); dpmgr_path_stop(pgc->dpmgr_handle, ovl2mem_cmdq_enabled()); ///cmdqRecDumpCommand(pgc->cmdq_handle_config); cmdqRecFlushAsyncCallback(pgc->cmdq_handle_config, ovl2mem_callback, atomic_read(&g_trigger_ticket)); cmdqRecReset(pgc->cmdq_handle_config); pgc->need_trigger_path = 0; atomic_add(1, &g_trigger_ticket ); _ovl2mem_path_unlock(__func__); DISPMSG("ovl2mem_trigger done %d\n", get_ovl2mem_ticket()); return ret; }
static int setup_rdma_sec(DISP_MODULE_ENUM module, disp_ddp_path_config* pConfig, void *handle) { static int rdma_is_sec[2]; CMDQ_ENG_ENUM cmdq_engine; int rdma_idx = rdma_index(module); DISP_BUFFER_TYPE security = pConfig->rdma_config.security; enum RDMA_MODE mode = rdma_config_mode(pConfig->rdma_config.address); cmdq_engine = rdma_idx==0 ? CMDQ_ENG_DISP_RDMA0 : CMDQ_ENG_DISP_RDMA1; if(!handle) { DDPMSG("[SVP] bypass rdma sec setting sec=%d,handle=NULL\n", security); return 0; } /* sec setting make sence only in memory mode ! */ if(mode == RDMA_MODE_MEMORY) { if(security == DISP_SECURE_BUFFER) { cmdqRecSetSecure(handle, 1); /* set engine as sec */ cmdqRecSecureEnablePortSecurity(handle, (1LL << cmdq_engine)); //cmdqRecSecureEnableDAPC(handle, (1LL << cmdq_engine)); if(rdma_is_sec[rdma_idx] == 0) DDPMSG("[SVP] switch rdma%d to sec\n", rdma_idx); rdma_is_sec[rdma_idx] = 1; } else { if(rdma_is_sec[rdma_idx]) { /* rdma is in sec stat, we need to switch it to nonsec */ cmdqRecHandle nonsec_switch_handle; int ret; ret = cmdqRecCreate(CMDQ_SCENARIO_DISP_PRIMARY_DISABLE_SECURE_PATH, &(nonsec_switch_handle)); if(ret) DDPAEE("[SVP]fail to create disable handle %s ret=%d\n", __FUNCTION__, ret); cmdqRecReset(nonsec_switch_handle); _cmdq_insert_wait_frame_done_token_mira(nonsec_switch_handle); cmdqRecSetSecure(nonsec_switch_handle, 1); /*ugly work around by kzhang !!. will remove when cmdq delete disable scenario. * To avoid translation fault like ovl (see notes in ovl.c)*/ do_rdma_config_l(module, pConfig, nonsec_switch_handle); /*in fact, dapc/port_sec will be disabled by cmdq*/ cmdqRecSecureEnablePortSecurity(nonsec_switch_handle, (1LL << cmdq_engine)); //cmdqRecSecureEnableDAPC(nonsec_switch_handle, (1LL << cmdq_engine)); cmdqRecFlush(nonsec_switch_handle); cmdqRecDestroy(nonsec_switch_handle); DDPMSG("[SVP] switch rdma%d to nonsec done\n", rdma_idx); } rdma_is_sec[rdma_idx] = 0; } } return 0; }
int ovl2mem_trigger(int blocking, void *callback, unsigned int userdata) { int ret = -1; int fence_idx = 0; int layid = 0; DISPFUNC(); if(pgc->need_trigger_path == 0) { DISPMSG("ovl2mem_trigger do not trigger\n"); if ((atomic_read(&g_trigger_ticket) - atomic_read(&g_release_ticket)) == 1) { DISPMSG("ovl2mem_trigger(%x), configue input, but does not config output!!\n", pgc->session); for(layid = 0; layid < (HW_OVERLAY_COUNT + 1); layid++) { fence_idx = mtkfb_query_idx_by_ticket(pgc->session, layid, atomic_read(&g_trigger_ticket)); if(fence_idx >=0) { mtkfb_release_fence(pgc->session, layid, fence_idx); } } } return ret; } _ovl2mem_path_lock(__func__); dpmgr_path_start(pgc->dpmgr_handle, ovl2mem_cmdq_enabled()); dpmgr_path_trigger(pgc->dpmgr_handle, pgc->cmdq_handle_config, ovl2mem_cmdq_enabled()); cmdqRecWait(pgc->cmdq_handle_config, CMDQ_EVENT_DISP_WDMA1_EOF); dpmgr_path_stop(pgc->dpmgr_handle, ovl2mem_cmdq_enabled()); ///cmdqRecDumpCommand(pgc->cmdq_handle_config); cmdqRecFlushAsyncCallback(pgc->cmdq_handle_config, ovl2mem_callback, atomic_read(&g_trigger_ticket)); cmdqRecReset(pgc->cmdq_handle_config); pgc->need_trigger_path = 0; atomic_add(1, &g_trigger_ticket ); _ovl2mem_path_unlock(__func__); dprec_logger_frame_seq_begin(pgc->session, mtkfb_query_frm_seq_by_addr(pgc->session, 0, 0)); DISPMSG("ovl2mem_trigger ovl2mem_seq %d-seq %d\n", get_ovl2mem_ticket(), mtkfb_query_frm_seq_by_addr(pgc->session, 0, 0)); return ret; }
static void _cmdq_reset_config_handle(void) { cmdqRecReset(pgc->cmdq_handle_config); ///dprec_event_op(DPREC_EVENT_CMDQ_RESET); }
static void _cmdq_build_trigger_loop(void) { int ret = 0; cmdqRecCreate(CMDQ_SCENARIO_TRIGGER_LOOP, &(pgc->cmdq_handle_trigger)); EXT_DISP_LOG("ext_disp path trigger thread cmd handle=%p\n", pgc->cmdq_handle_trigger); cmdqRecReset(pgc->cmdq_handle_trigger); if(ext_disp_is_video_mode()) { // wait and clear stream_done, HW will assert mutex enable automatically in frame done reset. // todo: should let dpmanager to decide wait which mutex's eof. ret = cmdqRecWait(pgc->cmdq_handle_trigger, dpmgr_path_get_mutex(pgc->dpmgr_handle) + CMDQ_EVENT_MUTEX0_STREAM_EOF); ///dpmgr_path_get_mutex(pgc->dpmgr_handle) // for some module(like COLOR) to read hw register to GPR after frame done dpmgr_path_build_cmdq(pgc->dpmgr_handle, pgc->cmdq_handle_trigger,CMDQ_AFTER_STREAM_EOF); } else { // DSI command mode doesn't have mutex_stream_eof, need use CMDQ token instead ret = cmdqRecWait(pgc->cmdq_handle_trigger, CMDQ_SYNC_TOKEN_CONFIG_DIRTY); //ret = cmdqRecWait(pgc->cmdq_handle_trigger, CMDQ_EVENT_MDP_DSI0_TE_SOF); // for operations before frame transfer, such as waiting for DSI TE dpmgr_path_build_cmdq(pgc->dpmgr_handle, pgc->cmdq_handle_trigger,CMDQ_BEFORE_STREAM_SOF); // cleat frame done token, now the config thread will not allowed to config registers. // remember that config thread's priority is higher than trigger thread, so all the config queued before will be applied then STREAM_EOF token be cleared // this is what CMDQ did as "Merge" ret = cmdqRecClearEventToken(pgc->cmdq_handle_trigger, CMDQ_SYNC_TOKEN_STREAM_EOF); // enable mutex, only cmd mode need this // this is what CMDQ did as "Trigger" dpmgr_path_trigger(pgc->dpmgr_handle, pgc->cmdq_handle_trigger, CMDQ_ENABLE); //ret = cmdqRecWrite(pgc->cmdq_handle_trigger, (unsigned int)(DISP_REG_CONFIG_MUTEX_EN(0))&0x1fffffff, 1, ~0); // waiting for frame done, because we can't use mutex stream eof here, so need to let dpmanager help to decide which event to wait // most time we wait rdmax frame done event. ret = cmdqRecWait(pgc->cmdq_handle_trigger, CMDQ_EVENT_DISP_RDMA1_EOF); dpmgr_path_build_cmdq(pgc->dpmgr_handle, pgc->cmdq_handle_trigger,CMDQ_WAIT_STREAM_EOF_EVENT); // dsi is not idle rightly after rdma frame done, so we need to polling about 1us for dsi returns to idle // do not polling dsi idle directly which will decrease CMDQ performance dpmgr_path_build_cmdq(pgc->dpmgr_handle, pgc->cmdq_handle_trigger,CMDQ_CHECK_IDLE_AFTER_STREAM_EOF); // for some module(like COLOR) to read hw register to GPR after frame done dpmgr_path_build_cmdq(pgc->dpmgr_handle, pgc->cmdq_handle_trigger,CMDQ_AFTER_STREAM_EOF); // polling DSI idle //ret = cmdqRecPoll(pgc->cmdq_handle_trigger, 0x1401b00c, 0, 0x80000000); // polling wdma frame done //ret = cmdqRecPoll(pgc->cmdq_handle_trigger, 0x140060A0, 1, 0x1); // now frame done, config thread is allowed to config register now ret = cmdqRecSetEventToken(pgc->cmdq_handle_trigger, CMDQ_SYNC_TOKEN_STREAM_EOF); // RUN forever!!!! BUG_ON(ret < 0); } // dump trigger loop instructions to check whether dpmgr_path_build_cmdq works correctly cmdqRecDumpCommand(pgc->cmdq_handle_trigger); EXT_DISP_LOG("ext display BUILD cmdq trigger loop finished\n"); return; }
static int wdma_config_l(DISP_MODULE_ENUM module, disp_ddp_path_config* pConfig, void *handle) { WDMA_CONFIG_STRUCT* config = &pConfig->wdma_config; int wdma_idx = wdma_index(module); CMDQ_ENG_ENUM cmdq_engine; if (!pConfig->wdma_dirty) { return 0; } //warm reset wdma every time we use it if(handle) { if(/*(primary_display_is_video_mode()==0 && primary_display_is_decouple_mode()==0) ||*/ primary_display_is_decouple_mode()==1) { unsigned int idx_offst = wdma_idx*DISP_WDMA_INDEX_OFFSET; DISP_REG_SET(handle, idx_offst+DISP_REG_WDMA_RST, 0x01); // trigger soft reset cmdqRecPoll(handle, disp_addr_convert(idx_offst+DISP_REG_WDMA_FLOW_CTRL_DBG), 1, 0x1); DISP_REG_SET(handle, idx_offst+DISP_REG_WDMA_RST , 0x0); // trigger soft reset } } cmdq_engine = wdma_idx==0 ? CMDQ_ENG_DISP_WDMA0 : CMDQ_ENG_DISP_WDMA1; if(config->security == DISP_SECURE_BUFFER) { cmdqRecSetSecure(handle, 1); /* set engine as sec */ cmdqRecSecureEnablePortSecurity(handle, (1LL << cmdq_engine)); cmdqRecSecureEnableDAPC(handle, (1LL << cmdq_engine)); if(wdma_is_sec[wdma_idx] == 0) DDPMSG("[SVP] switch wdma%d to sec\n", wdma_idx); wdma_is_sec[wdma_idx] = 1; } else { if(wdma_is_sec[wdma_idx]) { /* wdma is in sec stat, we need to switch it to nonsec */ cmdqRecHandle nonsec_switch_handle; int ret; ret = cmdqRecCreate(CMDQ_SCENARIO_DISP_PRIMARY_DISABLE_SECURE_PATH, &(nonsec_switch_handle)); if(ret) DDPAEE("[SVP]fail to create disable handle %s ret=%d\n", __FUNCTION__, ret); cmdqRecReset(nonsec_switch_handle); _cmdq_insert_wait_frame_done_token_mira(nonsec_switch_handle); cmdqRecSetSecure(nonsec_switch_handle, 1); /*in fact, dapc/port_sec will be disabled by cmdq*/ cmdqRecSecureEnablePortSecurity(nonsec_switch_handle, (1LL << cmdq_engine)); cmdqRecSecureEnableDAPC(nonsec_switch_handle, (1LL << cmdq_engine)); cmdqRecFlushAsync(nonsec_switch_handle); cmdqRecDestroy(nonsec_switch_handle); DDPMSG("[SVP] switch wdma%d to nonsec\n", wdma_idx); } wdma_is_sec[wdma_idx] = 0; } if (wdma_check_input_param(config) == 0) wdma_config(module, config->srcWidth, config->srcHeight, config->clipX, config->clipY, config->clipWidth, config->clipHeight, config->outputFormat, config->dstAddress, config->dstPitch, config->useSpecifiedAlpha, config->alpha, config->security, handle); return 0; }