int md_cd_let_md_go(struct ccci_modem *md)
{
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	if(MD_IN_DEBUG(md))
		return -1;
	CCCI_INF_MSG(md->index, TAG, "set MD boot slave\n"); 
	// set the start address to let modem to run
	cldma_write32(md_ctrl->md_boot_slave_Key, 0, 0x3567C766); // make boot vector programmable
	cldma_write32(md_ctrl->md_boot_slave_Vector, 0, 0x00000001); // after remap, MD ROM address is 0 from MD's view, MT6595 uses Thumb code
	cldma_write32(md_ctrl->md_boot_slave_En, 0, 0xA3B66175); // make boot vector take effect
	return 0;
}
int ccci_modem_resume(struct platform_device *dev)
{
	struct ccci_modem *md = (struct ccci_modem *)dev->dev.platform_data;
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	cldma_write32(md_ctrl->ap_ccif_base, APCCIF_CON, 0x01); // arbitration
	return 0;
}
int md_cd_power_on(struct ccci_modem *md)
{
    int ret = 0;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
#ifdef FEATURE_RF_CLK_BUF
    //config RFICx as BSI
    mutex_lock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->down(&clk_buf_ctrl_lock_2);
    CCCI_INF_MSG(md->index, TAG, "clock buffer, BSI mode\n"); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CK,  GPIO_MODE_01); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D0,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D1,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D2,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CS,  GPIO_MODE_01);
#endif
	// power on MD_INFRA and MODEM_TOP
    switch(md->index)
    {
        case MD_SYS1:
       	CCCI_INF_MSG(md->index, TAG, "Call start md_power_on()\n"); 
        ret = md_power_on(SYS_MD1);
        CCCI_INF_MSG(md->index, TAG, "Call end md_power_on() ret=%d\n",ret); 
        break;
    }
#ifdef FEATURE_RF_CLK_BUF 
	mutex_unlock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->delete
#endif
	if(ret)
		return ret;
	// disable MD WDT
	cldma_write32(md_ctrl->md_rgu_base, WDT_MD_MODE, WDT_MD_MODE_KEY);
	return 0;
}
void ccci_modem_restore_reg(struct ccci_modem *md)
{
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	int i;
	unsigned long flags;

  if(md->md_state == GATED||md->md_state == RESET||md->md_state == INVALID){
    CCCI_INF_MSG(md->index, TAG, "Resume no need reset cldma for md_state=%d\n",md->md_state);
    return;
  }    
	cldma_write32(md_ctrl->ap_ccif_base, APCCIF_CON, 0x01); // arbitration

	if(cldma_read32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_TQSAR(0)))
	{
		CCCI_INF_MSG(md->index, TAG, "Resume cldma pdn register: No need  ...\n");
	}
	else
	{
		CCCI_INF_MSG(md->index, TAG, "Resume cldma pdn register ...11\n");
    	spin_lock_irqsave(&md_ctrl->cldma_timeout_lock, flags);
    	cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_HPQR, 0x00);
        // set checksum
        switch (CHECKSUM_SIZE) {
        case 0:
            cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CHECKSUM_CHANNEL_ENABLE, 0);
            break;
        case 12:
            cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CHECKSUM_CHANNEL_ENABLE, CLDMA_BM_ALL_QUEUE);
            cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CFG, cldma_read32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CFG)&~0x10);
             break;
        case 16:
            cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CHECKSUM_CHANNEL_ENABLE, CLDMA_BM_ALL_QUEUE);
            cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CFG, cldma_read32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_CFG)|0x10);
            break;
        }
        // set start address
        for(i=0; i<QUEUE_LEN(md_ctrl->txq); i++) {
    		if(cldma_read32(md_ctrl->cldma_ap_ao_base, CLDMA_AP_TQCPBAK(md_ctrl->txq[i].index)) == 0){
    			CCCI_INF_MSG(md->index, TAG, "Resume CH(%d) current bak:== 0\n", i);
    			cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_TQSAR(md_ctrl->txq[i].index), md_ctrl->txq[i].tr_done->gpd_addr);
    			cldma_write32(md_ctrl->cldma_ap_ao_base, CLDMA_AP_TQSABAK(md_ctrl->txq[i].index), md_ctrl->txq[i].tr_done->gpd_addr);
    		}
    		else{
    			cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_TQSAR(md_ctrl->txq[i].index), cldma_read32(md_ctrl->cldma_ap_ao_base, CLDMA_AP_TQCPBAK(md_ctrl->txq[i].index)));
    			cldma_write32(md_ctrl->cldma_ap_ao_base, CLDMA_AP_TQSABAK(md_ctrl->txq[i].index), cldma_read32(md_ctrl->cldma_ap_ao_base, CLDMA_AP_TQCPBAK(md_ctrl->txq[i].index)));
    		}
        }
        wmb();
        // start all Tx and Rx queues
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_START_CMD, CLDMA_BM_ALL_QUEUE);
        cldma_read32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_UL_START_CMD); // dummy read
        md_ctrl->txq_active |= CLDMA_BM_ALL_QUEUE;
        //cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_SO_START_CMD, CLDMA_BM_ALL_QUEUE);
        //cldma_read32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_SO_START_CMD); // dummy read
        //md_ctrl->rxq_active |= CLDMA_BM_ALL_QUEUE;
        // enable L2 DONE and ERROR interrupts
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_L2TIMCR0, CLDMA_BM_INT_DONE|CLDMA_BM_INT_ERROR);
        // enable all L3 interrupts
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_L3TIMCR0, CLDMA_BM_INT_ALL);
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_L3TIMCR1, CLDMA_BM_INT_ALL);
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_L3RIMCR0, CLDMA_BM_INT_ALL);
        cldma_write32(md_ctrl->cldma_ap_pdn_base, CLDMA_AP_L3RIMCR1, CLDMA_BM_INT_ALL);
        spin_unlock_irqrestore(&md_ctrl->cldma_timeout_lock, flags);
        CCCI_INF_MSG(md->index, TAG, "Resume cldma pdn register done\n");
    }
}
int md_cd_power_on(struct ccci_modem *md)
{
    int ret = 0;
    unsigned int reg_value;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
    // turn on VLTE
#ifdef FEATURE_VLTE_SUPPORT
    mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,1);
    CCCI_INF_MSG(md->index, CORE, "md_cd_power_on:mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,1)\n");

    //if(!(mt6325_upmu_get_swcid()==PMIC6325_E1_CID_CODE ||
    //     mt6325_upmu_get_swcid()==PMIC6325_E2_CID_CODE))
    {
    CCCI_INF_MSG(md->index, CORE, "md_cd_power_on:set VLTE on,bit0,1\n");
    pmic_config_interface(0x04D6, 0x1, 0x1, 0); //bit[0] =>1'b1 
    udelay(200);
    /*
        *[Notes] move into md cmos flow, for hardwareissue, so disable on denlai.
        * bring up need confirm with MD DE & SPM 
        */
    //reg_value = ccci_read32(infra_ao_base,0x338); 
    //reg_value &= ~(0x40); //bit[6] =>1'b0
    //ccci_write32(infra_ao_base,0x338,reg_value);
    //CCCI_INF_MSG(md->index, CORE, "md_cd_power_on: set infra_misc VLTE bit(0x1000_0338)=0x%x, bit[6]=0x%x\n",ccci_read32(infra_ao_base,0x338),(ccci_read32(infra_ao_base,0x338)&0x40));
    }
#endif
#ifdef FEATURE_RF_CLK_BUF
    //config RFICx as BSI
    mutex_lock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->down(&clk_buf_ctrl_lock_2);
    CCCI_INF_MSG(md->index, TAG, "clock buffer, BSI ignore mode\n"); 

    mt_set_gpio_mode(GPIO_RFIC0_BSI_CK,  GPIO_MODE_01); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D0,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D1,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D2,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CS,  GPIO_MODE_01);
#endif
	// power on MD_INFRA and MODEM_TOP
    switch(md->index)
    {
        case MD_SYS1:

#if defined(CONFIG_MTK_LEGACY)
       	CCCI_INF_MSG(md->index, TAG, "Call start md_power_on()\n"); 
        ret = md_power_on(SYS_MD1);
        CCCI_INF_MSG(md->index, TAG, "Call end md_power_on() ret=%d\n",ret);
#else
        CCCI_INF_MSG(md->index, TAG, "Call start clk_prepare_enable()\n"); 
        clk_prepare_enable(clk_scp_sys_md1_main);
        CCCI_INF_MSG(md->index, TAG, "Call end clk_prepare_enable()\n");
#endif

        kicker_pbm_by_md(MD1,true);
        CCCI_INF_MSG(md->index, TAG, "Call end kicker_pbm_by_md(0,true)\n"); 
        break;
    }
#ifdef FEATURE_RF_CLK_BUF 
	mutex_unlock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->delete
#endif
	if(ret)
		return ret;
	// disable MD WDT
	cldma_write32(md_ctrl->md_rgu_base, WDT_MD_MODE, WDT_MD_MODE_KEY);
	return 0;
}