Esempio n. 1
0
int ovl2mem_input_config(ovl2mem_in_config *input)
{
	int ret = -1;
	int i = 0;
	disp_ddp_path_config *data_config;

	DISPFUNC();
	_ovl2mem_path_lock(__func__);

	/* all dirty should be cleared in dpmgr_path_get_last_config() */
	data_config = dpmgr_path_get_last_config(pgc->dpmgr_handle);
	data_config->dst_dirty = 0;
	data_config->ovl_dirty = 0;
	data_config->rdma_dirty = 0;

	if (pgc->state == 0) {
		DISPMSG("ovl2mem is already slept\n");
		_ovl2mem_path_unlock(__func__);
		return 0;
	}
	/* hope we can use only 1 input struct for input config, just set layer number */
	for (i = 0; i < HW_OVERLAY_COUNT; i++) {
		dprec_logger_start(DPREC_LOGGER_PRIMARY_CONFIG,
				   input->layer | (input->layer_en << 16), input->addr);

		if (input[i].layer_en) {
			if (input[i].vaddr)
				/* / _debug_pattern(0x00000000, input[i].vaddr, input[i].dst_w, input[i].dst_h,
						    input[i].src_pitch, 0x00000000, input[i].layer, input[i].buff_idx);
				 */
				;
			else
				/* /_debug_pattern(input[i].addr,0x00000000, input[i].dst_w, input[i].dst_h,
						   input[i].src_pitch, 0x00000000, input[i].layer, input[i].buff_idx);
				 */
				;
		}
		/* /DISPMSG("[primary], i:%d, layer:%d, layer_en:%d, dirty:%d -0x%x\n",
			    i, input[i].layer, input[i].layer_en, input[i].dirty, input[i].addr);
		 */
		if (input[i].dirty)
			ret = _convert_disp_input_to_ovl(&(data_config->ovl_config[input[i].layer]),
			(primary_disp_input_config *)&input[i]);

		data_config->ovl_dirty = 1;
		dprec_logger_done(DPREC_LOGGER_PRIMARY_CONFIG, input->src_x, input->src_y);

	}

	if (dpmgr_path_is_busy(pgc->dpmgr_handle))
		dpmgr_wait_event_timeout(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_COMPLETE, HZ / 5);

	ret = dpmgr_path_config(pgc->dpmgr_handle, data_config, pgc->cmdq_handle_config);

	_ovl2mem_path_unlock(__func__);

	DISPMSG("ovl2mem_input_config done\n");
	return ret;
}
Esempio n. 2
0
int ovl2mem_output_config(ovl2mem_out_config* out)
{
    int ret = -1;
    int i = 0;
	
	///DISPFUNC();
	
	_ovl2mem_path_lock(__func__);

    disp_ddp_path_config *data_config;  

    // all dirty should be cleared in dpmgr_path_get_last_config()
    data_config = dpmgr_path_get_last_config(pgc->dpmgr_handle);
    data_config->dst_dirty = 1;
    data_config->dst_h = out->h;
    data_config->dst_w = out->w;
    data_config->ovl_dirty = 0;
    data_config->rdma_dirty = 0;
    data_config->wdma_dirty = 1;
    data_config->wdma_config.dstAddress         = out->addr;
    data_config->wdma_config.srcHeight          = out->h;
    data_config->wdma_config.srcWidth           = out->w;
    data_config->wdma_config.clipX              = out->x;
    data_config->wdma_config.clipY              = out->y;
    data_config->wdma_config.clipHeight         = out->h;
    data_config->wdma_config.clipWidth          = out->w;
    data_config->wdma_config.outputFormat       = out->fmt; 
    data_config->wdma_config.dstPitch           = out->pitch; 
    data_config->wdma_config.useSpecifiedAlpha  = 1;
    data_config->wdma_config.alpha              = 0xFF;

    if(pgc->state == 0)
    {
        DISPMSG("ovl2mem is already sleeped\n");     
        _ovl2mem_path_unlock(__func__);   
        return 0;
    }

    
    if(dpmgr_path_is_busy(pgc->dpmgr_handle))
    {
        dpmgr_wait_event_timeout(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_DONE, HZ/5);
    }
    
    ret = dpmgr_path_config(pgc->dpmgr_handle, data_config, pgc->cmdq_handle_config);

    pgc->need_trigger_path = 1;
    
	_ovl2mem_path_unlock(__func__);

    ///DISPMSG("ovl2mem_output_config done\n");

	return ret;
}
Esempio n. 3
0
int ovl2mem_deinit(void)
{
	int ret = -1;

	DISPFUNC();

	_ovl2mem_path_lock(__func__);

	if (pgc->state == 0)
		goto Exit;

	ovl2mem_wait_done();

	dpmgr_path_stop(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_path_reset(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_path_deinit(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_destroy_path(pgc->dpmgr_handle, NULL);

	cmdqRecDestroy(pgc->cmdq_handle_config);

	pgc->dpmgr_handle = NULL;
	pgc->cmdq_handle_config = NULL;
	pgc->state = 0;
	pgc->need_trigger_path = 0;
	atomic_set(&g_trigger_ticket, 1);
	atomic_set(&g_release_ticket, 1);
	ovl2mem_layer_num = 0;

Exit:
	_ovl2mem_path_unlock(__func__);

	DISPMSG("ovl2mem_deinit done\n");
	return ret;
}
int ovl2mem_trigger(int blocking, void *callback, unsigned int userdata)
{
    int ret = -1;
    DISPFUNC();

    if(pgc->need_trigger_path == 0)
    {
        DISPMSG("ovl2mem_trigger do not trigger\n");
        return ret;
    }
    _ovl2mem_path_lock(__func__);

    dpmgr_path_start(pgc->dpmgr_handle, ovl2mem_cmdq_enabled());
    dpmgr_path_trigger(pgc->dpmgr_handle, pgc->cmdq_handle_config, ovl2mem_cmdq_enabled());
        
    cmdqRecWait(pgc->cmdq_handle_config, CMDQ_EVENT_DISP_WDMA1_EOF);
    
    dpmgr_path_stop(pgc->dpmgr_handle, ovl2mem_cmdq_enabled());

    ///cmdqRecDumpCommand(pgc->cmdq_handle_config);
    
    cmdqRecFlushAsyncCallback(pgc->cmdq_handle_config, ovl2mem_callback, atomic_read(&g_trigger_ticket));

    cmdqRecReset(pgc->cmdq_handle_config);
    
    pgc->need_trigger_path = 0;
    atomic_add(1, &g_trigger_ticket );
    
    _ovl2mem_path_unlock(__func__);

    DISPMSG("ovl2mem_trigger done %d\n", get_ovl2mem_ticket());
    
    return ret;
}
Esempio n. 5
0
int ovl2mem_trigger(int blocking, void *callback, unsigned int userdata)
{
    int ret = -1;
    int fence_idx = 0;
    int layid = 0;
    DISPFUNC();

    if(pgc->need_trigger_path == 0)
    {
        DISPMSG("ovl2mem_trigger do not trigger\n");
        if ((atomic_read(&g_trigger_ticket) - atomic_read(&g_release_ticket)) == 1)
        {
            DISPMSG("ovl2mem_trigger(%x), configue input, but does not config output!!\n", pgc->session);
            for(layid = 0; layid < (HW_OVERLAY_COUNT + 1); layid++)
            {
                fence_idx = mtkfb_query_idx_by_ticket(pgc->session, layid, atomic_read(&g_trigger_ticket));
                if(fence_idx >=0) 
                {
                    mtkfb_release_fence(pgc->session, layid, fence_idx);
                }
            }
        }
        return ret;
    }
    _ovl2mem_path_lock(__func__);

    dpmgr_path_start(pgc->dpmgr_handle, ovl2mem_cmdq_enabled());
    dpmgr_path_trigger(pgc->dpmgr_handle, pgc->cmdq_handle_config, ovl2mem_cmdq_enabled());
        
    cmdqRecWait(pgc->cmdq_handle_config, CMDQ_EVENT_DISP_WDMA1_EOF);
    
    dpmgr_path_stop(pgc->dpmgr_handle, ovl2mem_cmdq_enabled());

    ///cmdqRecDumpCommand(pgc->cmdq_handle_config);
    
    cmdqRecFlushAsyncCallback(pgc->cmdq_handle_config, ovl2mem_callback, atomic_read(&g_trigger_ticket));

    cmdqRecReset(pgc->cmdq_handle_config);
    
    pgc->need_trigger_path = 0;
    atomic_add(1, &g_trigger_ticket );
    
    _ovl2mem_path_unlock(__func__);

    dprec_logger_frame_seq_begin(pgc->session,  mtkfb_query_frm_seq_by_addr(pgc->session, 0, 0));
    DISPMSG("ovl2mem_trigger ovl2mem_seq %d-seq %d\n", get_ovl2mem_ticket(), mtkfb_query_frm_seq_by_addr(pgc->session, 0, 0));
    
    return ret;
}
Esempio n. 6
0
int ovl2mem_deinit(void)
{
	int ret = -1;
	int loop_cnt = 0;
	DISPFUNC();

	_ovl2mem_path_lock(__func__);

	if (pgc->state == 0)
		goto Exit;

	/* ovl2mem_wait_done(); */
	ovl2mem_layer_num = 0;
	while (((atomic_read(&g_trigger_ticket) - atomic_read(&g_release_ticket)) != 1) && (loop_cnt < 10)) {
		msleep(5);
		/* wait the last configuration done */
		loop_cnt++;
	}

	dpmgr_path_stop(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_path_reset(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_path_deinit(pgc->dpmgr_handle, CMDQ_DISABLE);
	dpmgr_destroy_path(pgc->dpmgr_handle, NULL);

	cmdqRecDestroy(pgc->cmdq_handle_config);

	pgc->dpmgr_handle = NULL;
	pgc->cmdq_handle_config = NULL;
	pgc->state = 0;
	pgc->need_trigger_path = 0;
	atomic_set(&g_trigger_ticket, 1);
	atomic_set(&g_release_ticket, 1);

 Exit:
	_ovl2mem_path_unlock(__func__);

	DISPMSG("ovl2mem_deinit done\n");
	return ret;
}
Esempio n. 7
0
int ovl2mem_init(unsigned int session)
{
    int ret = -1;
    DISPFUNC();   
    
    dpmgr_init();
	mutex_init(&(pgc->lock));

    _ovl2mem_path_lock(__func__);

    if(pgc->state > 0)
        goto Exit;
#if 0
    ret = cmdqCoreRegisterCB(CMDQ_GROUP_DISP, cmdqDdpClockOn,cmdqDdpDumpInfo,cmdqDdpResetEng,cmdqDdpClockOff);
    if(ret)
    {
        DISPERR("cmdqCoreRegisterCB failed, ret=%d \n", ret);
        goto done;
    }                    
#endif

    ret = cmdqRecCreate(CMDQ_SCENARIO_SUB_DISP,&(pgc->cmdq_handle_config));
    if(ret)
    {
        DISPCHECK("cmdqRecCreate FAIL, ret=%d \n", ret);
        goto Exit;
    }
    else
    {
        DISPCHECK("cmdqRecCreate SUCCESS, cmdq_handle=%p\n", pgc->cmdq_handle_config);
    }

	pgc->dpmgr_handle = dpmgr_create_path(DDP_SCENARIO_SUB_OVL_MEMOUT, pgc->cmdq_handle_config);

	if(pgc->dpmgr_handle)
	{
		DISPCHECK("dpmgr create path SUCCESS(%p)\n", pgc->dpmgr_handle);
	}
	else
	{
		DISPCHECK("dpmgr create path FAIL\n");
        goto Exit;
	}
	
    M4U_PORT_STRUCT sPort;
	sPort.ePortID = M4U_PORT_DISP_OVL1; 
	sPort.Virtuality = ovl2mem_use_m4u;					   
	sPort.Security = 0;
	sPort.Distance = 1;
	sPort.Direction = 0;
	ret = m4u_config_port(&sPort);
	sPort.ePortID = M4U_PORT_DISP_WDMA1;
	sPort.Virtuality = ovl2mem_use_m4u;					   
	sPort.Security = 0;
	sPort.Distance = 1;
	sPort.Direction = 0;
	ret = m4u_config_port(&sPort);
	if(ret == 0)
	{
		DISPCHECK("config M4U Port %s to %s SUCCESS\n",ddp_get_module_name(DISP_MODULE_OVL1), ovl2mem_use_m4u?"virtual":"physical");
	}
	else
	{
		DISPCHECK("config M4U Port %s to %s FAIL(ret=%d)\n",ddp_get_module_name(DISP_MODULE_OVL1), ovl2mem_use_m4u?"virtual":"physical", ret);
        goto Exit;
	}

    dpmgr_path_set_video_mode(pgc->dpmgr_handle, ovl2mem_cmdq_enabled());    
   
    dpmgr_path_init(pgc->dpmgr_handle, CMDQ_DISABLE);
    dpmgr_path_reset(pgc->dpmgr_handle, CMDQ_DISABLE);
    //dpmgr_path_set_dst_module(pgc->dpmgr_handle,DISP_MODULE_ENUM dst_module)

    dpmgr_enable_event(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_COMPLETE);

    pgc->max_layer = 4;	
	pgc->state = 1;
	pgc->session = session;
    atomic_set(&g_trigger_ticket, 1);
    atomic_set(&g_release_ticket, 1);

Exit:    
	_ovl2mem_path_unlock(__func__);

    DISPMSG("ovl2mem_init done\n");

	return ret;
}