int mtlte_df_init(void)
{
#if (FORMAL_DL_FLOW_CONTROL || BUFFER_POOL_FOR_EACH_QUE)
    unsigned int i;
#endif
	KAL_DBGPRINT(KAL, DBG_INFO,("====> %s\n",KAL_FUNC_NAME)) ;	
	
	mtlte_df_buff_threshold_default_setting() ;
    lte_df_core.cb_sw_int = NULL;
    lte_df_core.cb_wd_timeout = NULL;

#if FORMAL_DL_FLOW_CONTROL
    for (i=0; i<RXQ_NUM ; i++){
        lte_df_core.fl_ctrl_enable[i] = false;
        lte_df_core.fl_ctrl_record[i] = 0;
    }
#endif

    lte_df_core.dl_reload_work_queue = create_singlethread_workqueue("df_dl_reload_work");
	if (!lte_df_core.dl_reload_work_queue) {
		KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] create the dafa flow layer DL reload work fail!!\n")) ;
		return -ENOMEM;
	}
	INIT_WORK(&lte_df_core.dl_reload_work, mtlte_df_DL_reload_work);

#if	USE_MULTI_QUE_DISPATCH
    for (i=0; i<RXQ_NUM ; i++){
        sprintf(rxq_work_name[i], "rxq%d_dispatch_work", i);
        
        lte_df_core.rxq_dispatch_work_queue[i] = create_singlethread_workqueue(rxq_work_name[i]);
	    if (!lte_df_core.rxq_dispatch_work_queue[i]) {
		    KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] create the dafa flow layer rxq%d dispatch work fail!!\n", i)) ;
		    return -ENOMEM;
	    }
	    INIT_WORK(&lte_df_core.rxq_dispatch_work_param[i].rxq_dispatch_work, mtlte_df_DL_dispatch_rxque_work);
        lte_df_core.rxq_dispatch_work_param[i].rxq_num = i;
    }
#else
    lte_df_core.dl_dispatch_work_queue = create_singlethread_workqueue("df_dl_dispatch_work");
	if (!lte_df_core.dl_dispatch_work_queue) {
		KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] create the dafa flow layer DL dispatch work fail!!\n")) ;
		return -ENOMEM;
	}
	INIT_WORK(&lte_df_core.dl_dispatch_work, mtlte_df_DL_dispatch_work);
#endif

#if BUFFER_POOL_FOR_EACH_QUE
    for (i=0; i<RXQ_NUM ; i++){
        lte_df_core.df_skb_alloc_size[i] = DEV_MAX_PKT_SIZE;
        lte_df_core.df_buffer_pool_depth[i] = MT_LTE_DL_BUFF_POOL_TH;
    }
#endif

    KAL_AQUIREMUTEX(&lte_df_core.dl_pkt_lock) ;

	KAL_DBGPRINT(KAL, DBG_INFO,("<==== %s\n",KAL_FUNC_NAME)) ;

	return KAL_SUCCESS ; 
}
Exemplo n.º 2
0
//static
//KAL_INT32 __init mtlte_sys_sdio_driver_init(void)
int mtlte_sys_sdio_driver_init(void)
{	
	int ret = KAL_SUCCESS ;
	
    KAL_RAWPRINT(("[INIT] =======> lte_sdio_driver_init\n")); 	

	lte_dev.card_exist = 0 ;
	lte_dev.sdio_func = NULL;
    /* init thread related parameters */
    init_waitqueue_head(&lte_dev.sdio_thread_wq);        

#ifdef MT_LTE_AUTO_CALIBRATION    
    autok_module_init();
#endif
	
	/* init the hif layer */
    if ((ret = mtlte_hif_sdio_init()) != KAL_SUCCESS){
    	KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init -mtlte_hif_sdio_init fail \n")); 
		goto HIF_INITFAIL ; 
    }
	/* init the data flow layer */
    if ((ret = mtlte_df_init()) != KAL_SUCCESS){
       	KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init -mtlte_df_init fail \n")); 
		goto DF_INITFAIL ; 
    }

    if ((ret = mtlte_expt_init()) != KAL_SUCCESS){
       	KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init -mtlte_expt_init fail \n")); 
		goto DF_INITFAIL ; 
    }

#if EMCS_SDIO_DRVTST	
	if ((ret = mtlte_dev_test_drvinit()) != KAL_SUCCESS){
		KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init -mtlte_dev_test_drvinit fail \n")); 
		goto TEST_DRV_INITFAIL ; 
    }
#endif	


    if ((ret = sdio_register_driver(&mtlte_driver)) != KAL_SUCCESS){
		KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init -sdio_register_driver fail \n")); 
		goto SDIO_REG_FAIL ; 
    }

    KAL_AQUIREMUTEX(&lte_dev.thread_kick_lock) ;

    if ((ret = sdio_onoff_module_init()) != KAL_SUCCESS){
		KAL_RAWPRINT(("[INIT] XXXXXX lte_sdio_driver_init - onoff_char_dev register fail \n")); 
		goto ONOFF_DEV_FAIL ; 
    }
    
	KAL_RAWPRINT(("[INIT] <======= lte_sdio_driver_init\n")); 	
    return ret ;

ONOFF_DEV_FAIL:
    sdio_unregister_driver(&mtlte_driver);
SDIO_REG_FAIL :      	
#if EMCS_SDIO_DRVTST	
	mtlte_dev_test_drvdeinit() ;
TEST_DRV_INITFAIL :
#endif	

	mtlte_df_deinit() ;
DF_INITFAIL :    
	mtlte_hif_sdio_deinit() ;
HIF_INITFAIL :	

	KAL_RAWPRINT(("[INIT FAIL] <======= lte_sdio_driver_init\n")); 	
	return ret ;   
}