static int _btif_tx_fifo_init(P_MTK_BTIF_INFO_STR p_btif_info)
{
    int i_ret = -1;

    spin_lock_init(&(p_btif_info->tx_fifo_spinlock));

    if (NULL == p_btif_info->p_tx_fifo) {
        p_btif_info->p_tx_fifo = kzalloc(sizeof(struct kfifo),
                                         GFP_ATOMIC);
        if (NULL == p_btif_info->p_tx_fifo) {
            i_ret = -ENOMEM;
            BTIF_ERR_FUNC("kzalloc for p_btif->p_tx_fifo failed\n");
            goto ret;
        }

        i_ret = kfifo_alloc(p_btif_info->p_tx_fifo,
                            BTIF_HAL_TX_FIFO_SIZE, GFP_ATOMIC);
        if (0 != i_ret) {
            BTIF_ERR_FUNC("kfifo_alloc failed, errno(%d)\n", i_ret);
            i_ret = -ENOMEM;
            goto ret;
        }
        i_ret = 0;
    } else {
        BTIF_WARN_FUNC
        ("p_btif_info->p_tx_fifo is already init p_btif_info->p_tx_fifo(0x%x)\n",
         p_btif_info->p_tx_fifo);
        i_ret = 0;
    }
ret:
    return i_ret;
}
Beispiel #2
0
static void _btif_set_default_setting(void)
{
	struct device_node *node = NULL;
	unsigned int irq_info[3] = {0, 0, 0};
	unsigned int phy_base;
	
	node = of_find_compatible_node(NULL, NULL, "mediatek,BTIF");
	if(node){
		mtk_btif.p_irq->irq_id = irq_of_parse_and_map(node,0);
		/*fixme, be compitable arch 64bits*/
		mtk_btif.base = (unsigned long)of_iomap(node, 0);
		BTIF_INFO_FUNC("get btif irq(%d),register base(0x%lx)\n",
			mtk_btif.p_irq->irq_id,mtk_btif.base);
	}else{
		BTIF_ERR_FUNC("get btif device node fail\n");
	}

	/* get the interrupt line behaviour */
    if (of_property_read_u32_array(node, "interrupts",
			irq_info, ARRAY_SIZE(irq_info))){
		BTIF_ERR_FUNC("get interrupt flag from DTS fail\n");
	}else{
		mtk_btif.p_irq->irq_flags = irq_info[2];
		BTIF_INFO_FUNC("get interrupt flag(0x%x)\n",mtk_btif.p_irq->irq_flags);
	}

	if (of_property_read_u32_index(node, "reg", 0, &phy_base)){
		BTIF_ERR_FUNC("get register phy base from DTS fail\n");
    }else{
		BTIF_INFO_FUNC("get register phy base(0x%lx)\n",(unsigned long)phy_base);
	}
		
}
/*****************************************************************************
* FUNCTION
*  hal_btif_raise_wak_sig
* DESCRIPTION
*  raise wakeup signal to counterpart
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_raise_wak_sig(P_MTK_BTIF_INFO_STR p_btif)
{
    int i_ret = -1;
    unsigned int base = p_btif->base;
#if MTK_BTIF_ENABLE_CLK_CTL
    if (0 == clock_is_on(MTK_BTIF_CG_BIT)) {
        BTIF_ERR_FUNC("%s: clock is off before send wakeup signal!!!\n",
                      __FILE__);
        return i_ret;
    }
#endif
    /*write 0  to BTIF_WAK to pull ap_wakeup_consyss low */
    BTIF_CLR_BIT(BTIF_WAK(base), BTIF_WAK_BIT);

    /*wait for a period for longer than 1/32k period, here we use 40us*/
    set_current_state(TASK_UNINTERRUPTIBLE);
    usleep_range(64, 96);
    /*according to linux/documentation/timers/timers-how-to, we choose usleep_range
    SLEEPING FOR ~USECS OR SMALL MSECS ( 10us - 20ms):      * Use usleep_range
    */
    /*write 1 to pull ap_wakeup_consyss high*/
    BTIF_SET_BIT(BTIF_WAK(base), BTIF_WAK_BIT);
    i_ret = 0;
    return i_ret;
}
int btif_tx_dma_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_DMA_CTRL ctrl_id)
{
	unsigned int i_ret = -1;
	unsigned long base = p_dma_info->base;
	unsigned int dat;
	BTIF_TRC_FUNC();
	if (DMA_CTRL_DISABLE == ctrl_id) {
		/*if write 0 to EN bit, DMA will be stoped imediately*/
		/*if write 1 to STOP bit, DMA will be stoped after current transaction finished*/
		/*BTIF_CLR_BIT(TX_DMA_EN(base), DMA_EN_BIT);*/
		BTIF_SET_BIT(TX_DMA_STOP(base), DMA_STOP_BIT);
		do {
			dat = BTIF_READ32(TX_DMA_STOP(base));
		} while (0x1 & dat);
		BTIF_DBG_FUNC("BTIF Tx DMA disabled,EN(0x%x),STOP(0x%x)\n",
			BTIF_READ32(TX_DMA_EN(base)), BTIF_READ32(TX_DMA_STOP(base)));
		i_ret = 0;
	} else if (DMA_CTRL_ENABLE == ctrl_id) {
		BTIF_SET_BIT(TX_DMA_EN(base), DMA_EN_BIT);
		BTIF_DBG_FUNC("BTIF Tx DMA enabled\n");
		i_ret = 0;
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC("invalid DMA ctrl_id (%d)\n", ctrl_id);
		i_ret = ERR_INVALID_PAR;
	}
	BTIF_TRC_FUNC();
	return i_ret;
}
int btif_rx_dma_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_DMA_CTRL ctrl_id)
{
	unsigned int i_ret = -1;
	unsigned int base = p_dma_info->base;

	BTIF_TRC_FUNC();

	if (DMA_CTRL_DISABLE == ctrl_id) {
/*if write 0 to EN bit, DMA will be stoped imediately*/
/*if write 1 to STOP bit, DMA will be stoped after current transaction finished*/
		BTIF_CLR_BIT(RX_DMA_EN(base), DMA_EN_BIT);
		BTIF_DBG_FUNC("BTIF Rx DMA disabled\n");
		i_ret = 0;
	} else if (DMA_CTRL_ENABLE == ctrl_id) {
		BTIF_SET_BIT(RX_DMA_EN(base), DMA_EN_BIT);
		BTIF_DBG_FUNC("BTIF Rx DMA enabled\n");
		i_ret = 0;
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC("invalid DMA ctrl_id (%d)\n", ctrl_id);
		i_ret = ERR_INVALID_PAR;
	}
	BTIF_TRC_FUNC();

	return i_ret;
}
Beispiel #6
0
/*****************************************************************************
* FUNCTION
*  hal_btif_dump_reg
* DESCRIPTION
*  dump BTIF module's information when needed
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* flag        [IN]        register id flag
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dump_reg(P_MTK_BTIF_INFO_STR p_btif, ENUM_BTIF_REG_ID flag)
{
    /*Chaozhong: To be implement*/
	int i_ret = -1;
	int idx = 0;
	unsigned long irq_flag = 0;
	unsigned int base = p_btif->base;
	unsigned char reg_map[0xE0/4] = {0};
	unsigned int lsr = 0x0;
	unsigned int dma_en = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
	
	if (0 == clock_is_on(MTK_BTIF_CG_BIT))
    {
        spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
        BTIF_ERR_FUNC("%s: clock is off, this should never happen!!!\n", __FILE__);
        return i_ret;
    }
	
	lsr = BTIF_READ32(BTIF_LSR(base));
	dma_en = BTIF_READ32(BTIF_DMA_EN(base));
	
	for (idx = 0 ; idx < sizeof (reg_map); idx++)
	{
		reg_map[idx] = BTIF_READ8(p_btif->base + (4 * idx));
	}
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
	BTIF_INFO_FUNC("BTIF's clock is on\n");
	BTIF_INFO_FUNC("base address: 0x%x\n", base);
	switch (flag)
	{
		case REG_BTIF_ALL:
#if 0			
			BTIF_INFO_FUNC("BTIF_IER:0x%x\n", BTIF_READ32(BTIF_IER(base)));
			BTIF_INFO_FUNC("BTIF_IIR:0x%x\n", BTIF_READ32(BTIF_IIR(base)));
			BTIF_INFO_FUNC("BTIF_FAKELCR:0x%x\n", BTIF_READ32(BTIF_FAKELCR(base)));
			BTIF_INFO_FUNC("BTIF_LSR:0x%x\n", BTIF_READ32(BTIF_LSR(base)));
			BTIF_INFO_FUNC("BTIF_SLEEP_EN:0x%x\n", BTIF_READ32(BTIF_SLEEP_EN(base)));
			BTIF_INFO_FUNC("BTIF_DMA_EN:0x%x\n", BTIF_READ32(BTIF_DMA_EN(base)));
			BTIF_INFO_FUNC("BTIF_RTOCNT:0x%x\n", BTIF_READ32(BTIF_RTOCNT(base)));
			BTIF_INFO_FUNC("BTIF_TRI_LVL:0x%x\n", BTIF_READ32(BTIF_TRI_LVL(base)));
			BTIF_INFO_FUNC("BTIF_WAT_TIME:0x%x\n", BTIF_READ32(BTIF_WAT_TIME(base)));
			BTIF_INFO_FUNC("BTIF_HANDSHAKE:0x%x\n", BTIF_READ32(BTIF_HANDSHAKE(base)));
#endif
			btif_dump_array("BTIF register", reg_map, sizeof (reg_map));
			break;
		default:
			break;
	}

	BTIF_INFO_FUNC("Tx DMA %s\n", (dma_en & BTIF_DMA_EN_TX) ? "enabled" : "disabled");
	BTIF_INFO_FUNC("Rx DMA %s\n", (dma_en & BTIF_DMA_EN_RX) ? "enabled" : "disabled");

	BTIF_INFO_FUNC("Rx data is %s\n", (lsr & BTIF_LSR_DR_BIT) ? "not empty" : "empty");
	BTIF_INFO_FUNC("Tx data is %s\n", (lsr & BTIF_LSR_TEMT_BIT) ? "empty" : "not empty");
	

    return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_btif_rx_handler
* DESCRIPTION
*  lower level interrupt handler
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success; negative means fail; positive means rx data length
*****************************************************************************/
int hal_btif_irq_handler(P_MTK_BTIF_INFO_STR p_btif,
                         unsigned char *p_buf, const unsigned int max_len)
{
    /*Chaozhong: To be implement*/
    int i_ret = -1;
    unsigned int iir = 0;
    unsigned int rx_len = 0;
    unsigned int base = p_btif->base;
    unsigned long irq_flag = 0;

#if 0
    /*check parameter valid or not*/
    if ((NULL == p_buf) || (max_len == 0)) {
        i_ret = ERR_INVALID_PAR;
        return i_ret;
    }
#endif
    spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
#if MTK_BTIF_ENABLE_CLK_CTL
    if (0 == clock_is_on(MTK_BTIF_CG_BIT)) {
        spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
        BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
                      __FILE__);
        return i_ret;
    }
#endif
    /*read interrupt identifier register*/
    iir = BTIF_READ32(BTIF_IIR(base));

    /*is rx interrupt exist?*/
#if 0
    while ((iir & BTIF_IIR_RX) && (rx_len < max_len)) {
        rx_len +=
            btif_rx_irq_handler(p_btif, (p_buf + rx_len),
                                (max_len - rx_len));

        /*update IIR*/
        iir = BTIF_READ32(BTIF_IIR(base));
    }
#endif

    while (iir & (BTIF_IIR_RX | BTIF_IIR_RX_TIMEOUT)) {
        rx_len += btif_rx_irq_handler(p_btif, p_buf, max_len);

        /*update IIR*/
        iir = BTIF_READ32(BTIF_IIR(base));
    }

    /*is tx interrupt exist?*/
    if (iir & BTIF_IIR_TX_EMPTY) {
        i_ret = btif_tx_irq_handler(p_btif);
    }
    spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
    i_ret = rx_len != 0 ? rx_len : i_ret;
    return i_ret;
}
static int _tx_dma_flush(P_MTK_DMA_INFO_STR p_dma_info)
{
	unsigned int i_ret = -1;
	unsigned long base = p_dma_info->base;
	unsigned int stop = BTIF_READ32(TX_DMA_STOP(base));

/*in MTK DMA BTIF channel we cannot set STOP and FLUSH bit at the same time*/
	if ((DMA_STOP_BIT && stop) != 0)
		BTIF_ERR_FUNC("BTIF's DMA in stop state, omit flush operation\n");
	else {
		BTIF_DBG_FUNC("flush tx dma\n");
		BTIF_SET_BIT(TX_DMA_FLUSH(base), DMA_FLUSH_BIT);
		i_ret = 0;
	}
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_ctrl
* DESCRIPTION
* enable/disable Tx DMA channel
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* ctrl_id          [IN]        enable/disable ID
* RETURNS
*  0 means success; negative means fail
*****************************************************************************/
int hal_btif_dma_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_DMA_CTRL ctrl_id)
{
	unsigned int i_ret = -1;
	ENUM_DMA_DIR dir = p_dma_info->dir;

	if (DMA_DIR_RX == dir)
		i_ret = btif_rx_dma_ctrl(p_dma_info, ctrl_id);
	else if (DMA_DIR_TX == dir)
		i_ret = btif_tx_dma_ctrl(p_dma_info, ctrl_id);
	else {
		/*TODO: print error log*/
		BTIF_ERR_FUNC("invalid dma ctrl id (%d)\n", ctrl_id);
		i_ret = ERR_INVALID_PAR;
	}
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_btif_info_get
* DESCRIPTION
*  get btif's information included base address , irq related information
* PARAMETERS
* RETURNS
*  BTIF's informations
*****************************************************************************/
P_MTK_BTIF_INFO_STR hal_btif_info_get(void)
{
#if NEW_TX_HANDLING_SUPPORT
    int i_ret = 0;
    /*tx fifo and fifo lock init*/
    i_ret = _btif_tx_fifo_init(&mtk_btif);
    if (0 == i_ret) {
        BTIF_INFO_FUNC("_btif_tx_fifo_init succeed\n");
    } else {
        BTIF_ERR_FUNC("_btif_tx_fifo_init failed, i_ret:%d\n", i_ret);
    }
#endif
    spin_lock_init(&g_clk_cg_spinlock);

    return &mtk_btif;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_ier_ctrl
* DESCRIPTION
*  BTIF Tx DMA's interrupt enable/disable
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* enable    [IN]        control if tx interrupt enabled or not
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dma_ier_ctrl(P_MTK_DMA_INFO_STR p_dma_info, bool en)
{
	unsigned int i_ret = -1;
	ENUM_DMA_DIR dir = p_dma_info->dir;

	if (DMA_DIR_RX == dir) {
		i_ret = btif_rx_dma_ier_ctrl(p_dma_info, en);
	} else if (DMA_DIR_TX == dir) {
		i_ret = btif_tx_dma_ier_ctrl(p_dma_info, en);
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC("invalid DMA dma dir (%d)\n", dir);
		i_ret = ERR_INVALID_PAR;
	}

	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_info_get
* DESCRIPTION
*  get btif tx dma channel's information
* PARAMETERS
* dma_dir        [IN]         DMA's direction
* RETURNS
*  pointer to btif dma's information structure
*****************************************************************************/
P_MTK_DMA_INFO_STR hal_btif_dma_info_get(ENUM_DMA_DIR dma_dir)
{
	P_MTK_DMA_INFO_STR p_dma_info = NULL;

	BTIF_TRC_FUNC();
	if (DMA_DIR_RX == dma_dir) {
/*Rx DMA*/
		p_dma_info = &mtk_btif_rx_dma;
	} else if (DMA_DIR_TX == dma_dir) {
/*Tx DMA*/
		p_dma_info = &mtk_btif_tx_dma;
	} else {
/*print error log*/
		BTIF_ERR_FUNC("invalid DMA dir (%d)\n", dma_dir);
	}
	spin_lock_init(&g_clk_cg_spinlock);
	BTIF_TRC_FUNC();
	return p_dma_info;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_info_get
* DESCRIPTION
*  get btif tx dma channel's information
* PARAMETERS
* dma_dir        [IN]         DMA's direction
* RETURNS
*  pointer to btif dma's information structure
*****************************************************************************/
P_MTK_DMA_INFO_STR hal_btif_dma_info_get(ENUM_DMA_DIR dma_dir)
{
	P_MTK_DMA_INFO_STR p_dma_info = NULL;

	BTIF_TRC_FUNC();
#ifdef CONFIG_OF
	hal_dma_set_default_setting(dma_dir);
#endif
	if (DMA_DIR_RX == dma_dir)
		/*Rx DMA*/
		p_dma_info = &mtk_btif_rx_dma;
	else if (DMA_DIR_TX == dma_dir)
		/*Tx DMA*/
		p_dma_info = &mtk_btif_tx_dma;
	else
		/*print error log*/
		BTIF_ERR_FUNC("invalid DMA dir (%d)\n", dma_dir);
	spin_lock_init(&g_clk_cg_spinlock);
	BTIF_TRC_FUNC();
	return p_dma_info;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_info_get
* DESCRIPTION
*  get btif tx dma channel's information
* PARAMETERS
* dma_dir        [IN]         DMA's direction
* RETURNS
*  pointer to btif dma's information structure
*****************************************************************************/
P_MTK_DMA_INFO_STR hal_btif_dma_info_get(ENUM_DMA_DIR dma_dir)
{
	P_MTK_DMA_INFO_STR p_dma_info = NULL;

	BTIF_TRC_FUNC();
	if (DMA_DIR_RX == dma_dir) {
/*Rx DMA*/
		p_dma_info = &mtk_btif_rx_dma;
	} else if (DMA_DIR_TX == dma_dir) {
/*Tx DMA*/
		p_dma_info = &mtk_btif_tx_dma;
	} else {
/*print error log*/
		BTIF_ERR_FUNC("invalid DMA dir (%d)\n", dma_dir);
	}
	spin_lock_init(&g_clk_cg_spinlock);
/*dummy call to prevent build warning*/
    hal_dma_receive_data(NULL, NULL, 0);
	BTIF_TRC_FUNC();
	return p_dma_info;
}
/*****************************************************************************
* FUNCTION
*  hal_dma_send_data
* DESCRIPTION
*  send data through btif in DMA mode
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN]        pointer to rx data buffer
* max_len  [IN]        tx buffer length
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_dma_send_data(P_MTK_DMA_INFO_STR p_dma_info,
		      const unsigned char *p_buf, const unsigned int buf_len)
{
	unsigned int i_ret = -1;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	unsigned int len_to_send = buf_len;
	unsigned int ava_len = 0;
	unsigned int wpt = 0;
	unsigned int last_wpt_wrap = 0;
	unsigned int vff_size = 0;
	unsigned char *p_data = (unsigned char *)p_buf;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);

	BTIF_TRC_FUNC();
	if ((NULL == p_buf) || (0 == buf_len)) {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid parameters, p_buf:0x%08x, buf_len:%d\n",
			      p_buf, buf_len);
		return i_ret;
	}
/*check if tx dma in flush operation? if yes, should wait until DMA finish flush operation*/
/*currently uplayer logic will make sure this pre-condition*/
/*disable Tx IER, in case Tx irq happens, flush bit may be set in irq handler*/
	btif_tx_dma_ier_ctrl(p_dma_info, false);

	vff_size = p_mtk_vfifo->vfifo.vfifo_size;
	ava_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	wpt = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_MASK;
	last_wpt_wrap = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_WRAP;

/*copy data to vFIFO, Note: ava_len should always large than buf_len, otherwise common logic layer will not call hal_dma_send_data*/
	if (buf_len > ava_len) {
		BTIF_ERR_FUNC
		    ("length to send:(%d) < length available(%d), abnormal!!!---!!!\n",
		     buf_len, ava_len);
		BUG_ON(buf_len > ava_len);	/* this will cause kernel panic */
	}

	len_to_send = buf_len < ava_len ? buf_len : ava_len;
	if (len_to_send + wpt >= vff_size) {
		unsigned int tail_len = vff_size - wpt;
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt), p_data, tail_len);
		p_data += tail_len;
		memcpy(p_mtk_vfifo->vfifo.p_vir_addr,
		       p_data, len_to_send - tail_len);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt = wpt + len_to_send - vff_size;
		last_wpt_wrap ^= DMA_WPT_WRAP;
	} else {
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt),
		       p_data, len_to_send);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt += len_to_send;
	}
	p_mtk_vfifo->wpt = wpt;
	p_mtk_vfifo->last_wpt_wrap = last_wpt_wrap;

/*make sure tx dma is allowed(tx flush bit is not set) to use before update WPT*/
	if (hal_dma_is_tx_allow(p_dma_info)) {
/*make sure tx dma enabled*/
		hal_btif_dma_ctrl(p_dma_info, DMA_CTRL_ENABLE);

/*update WTP to Tx DMA controller's control register*/
		btif_reg_sync_writel(wpt | last_wpt_wrap, TX_DMA_VFF_WPT(base));

		if ((8 > BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base))) &&
		    (0 < BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)))) {
/*0 < valid size in Tx vFIFO < 8 && TX Flush is not in process<should always be done>? if yes, set flush bit to DMA*/
			_tx_dma_flush(p_dma_info);
		}
		i_ret = len_to_send;
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC
		    ("Tx DMA flush operation is in process, this case should never happen, please check if tx operation is allowed before call this API\n");
/*if flush operation is in process , we will return 0*/
		i_ret = 0;
	}

/*Enable Tx IER*/
	btif_tx_dma_ier_ctrl(p_dma_info, true);

	BTIF_TRC_FUNC();
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_btif_clk_ctrl
* DESCRIPTION
*  control clock output enable/disable of BTIF module
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_clk_ctrl(P_MTK_BTIF_INFO_STR p_btif, ENUM_CLOCK_CTRL flag)
{
    /*In MTK BTIF, there's only one global CG on AP_DMA, no sub channel's CG bit*/
    /*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/
    int i_ret = 0;
    unsigned long irq_flag = 0;

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
    static atomic_t s_clk_ref = ATOMIC_INIT(0);
#else
    static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE;
#endif
    spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_CTL

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

    if (CLK_OUT_ENABLE == flag) {
        if (1 == atomic_inc_return(&s_clk_ref)) {
            i_ret = enable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID);
            if (i_ret) {
                BTIF_WARN_FUNC
                ("enable_clock for MTK_BTIF_CG_BIT failed, ret:%d",
                 i_ret);
            }
        }
    } else if (CLK_OUT_DISABLE == flag) {
        if (0 == atomic_dec_return(&s_clk_ref)) {
            i_ret = disable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID);
            if (i_ret) {
                BTIF_WARN_FUNC
                ("disable_clock for MTK_BTIF_CG_BIT failed, ret:%d",
                 i_ret);
            }
        }
    } else {
        i_ret = ERR_INVALID_PAR;
        BTIF_ERR_FUNC("invalid	clock ctrl flag (%d)\n", flag);
    }

#else

    if (status == flag) {
        i_ret = 0;
        BTIF_DBG_FUNC("btif clock already %s\n",
                      CLK_OUT_ENABLE ==
                      status ? "enabled" : "disabled");
    } else {
        if (CLK_OUT_ENABLE == flag) {
            i_ret = enable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID);
            status = (0 == i_ret) ? flag : status;
            if (i_ret) {
                BTIF_WARN_FUNC
                ("enable_clock for MTK_BTIF_CG_BIT failed, ret:%d",
                 i_ret);
            }
        } else if (CLK_OUT_DISABLE == flag) {
            i_ret = disable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID);
            status = (0 == i_ret) ? flag : status;
            if (i_ret) {
                BTIF_WARN_FUNC
                ("disable_clock for MTK_BTIF_CG_BIT failed, ret:%d",
                 i_ret);
            }
        } else {
            i_ret = ERR_INVALID_PAR;
            BTIF_ERR_FUNC("invalid	clock ctrl flag (%d)\n", flag);
        }
    }
#endif

#else

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

#else

    status = flag;
#endif

    i_ret = 0;
#endif

    spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
    if (0 == i_ret) {
        BTIF_DBG_FUNC("btif clock %s\n",
                      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
    } else {
        BTIF_ERR_FUNC("%s btif clock failed, ret(%d)\n",
                      CLK_OUT_ENABLE == flag ? "enable" : "disable",
                      i_ret);
    }
#else

    if (0 == i_ret) {
        BTIF_DBG_FUNC("btif clock %s\n",
                      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
    } else {
        BTIF_ERR_FUNC("%s btif clock failed, ret(%d)\n",
                      CLK_OUT_ENABLE == flag ? "enable" : "disable",
                      i_ret);
    }
#endif
#if MTK_BTIF_ENABLE_CLK_CTL
    BTIF_DBG_FUNC("BTIF's clock is %s\n",
                  (0 == clock_is_on(MTK_BTIF_CG_BIT)) ? "off" : "on");
#endif

    return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_btif_clk_ctrl
* DESCRIPTION
*  control clock output enable/disable of DMA module
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dma_clk_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_CLOCK_CTRL flag)
{
/*In MTK DMA BTIF channel, there's only one global CG on AP_DMA, no sub channel's CG bit*/
/*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/
	int i_ret = 0;
	unsigned long irq_flag = 0;

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	static atomic_t s_clk_ref = ATOMIC_INIT(0);
#else
	static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE;
#endif

#if defined(CONFIG_MTK_LEGACY)
	spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
#endif

#if MTK_BTIF_ENABLE_CLK_CTL

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

	if (CLK_OUT_ENABLE == flag) {
		if (1 == atomic_inc_return(&s_clk_ref)) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			clk_prepare(clk_btif_apdma);
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_enable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n");
#endif /* defined(CONFIG_MTK_LEGACY) */
		}
	} else if (CLK_OUT_DISABLE == flag) {
		if (0 == atomic_dec_return(&s_clk_ref)) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_disable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			clk_unprepare(clk_btif_apdma);
			BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		}
	} else {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
	}

#else

	if (status == flag) {
		i_ret = 0;
		BTIF_DBG_FUNC("dma clock already %s\n",
			      CLK_OUT_ENABLE ==
			      status ? "enabled" : "disabled");
	} else {
		if (CLK_OUT_ENABLE == flag) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			clk_prepare(clk_btif_apdma);
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_enable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		} else if (CLK_OUT_DISABLE == flag) {
#if defined(CONFIG_MTK_LEGACY)
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
#else
			spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);
			clk_disable(clk_btif_apdma);
			spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
			clk_unprepare(clk_btif_apdma);
			BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n");
#endif /* defined(CONFIG_MTK_LEGACY) */

		} else {
			i_ret = ERR_INVALID_PAR;
			BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
		}
	}
#endif

#else

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

#else

	status = flag;
#endif

	i_ret = 0;
#endif

#if defined(CONFIG_MTK_LEGACY)
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);
#endif

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#else

	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#endif
#if defined(CONFIG_MTK_LEGACY)
	BTIF_DBG_FUNC("DMA's clock is %s\n",
		      (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) ? "off" : "on");
#endif
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_btif_send_data
* DESCRIPTION
*  send data through btif in FIFO mode
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* p_buf     [IN]        pointer to rx data buffer
* max_len  [IN]        tx buffer length
* RETURNS
*   positive means number of data sent; 0 means no data put to FIFO; negative means error happens
*****************************************************************************/
int hal_btif_send_data(P_MTK_BTIF_INFO_STR p_btif,
                       const unsigned char *p_buf, const unsigned int buf_len)
{
    /*Chaozhong: To be implement*/
    int i_ret = -1;

    unsigned int ava_len = 0;
    unsigned int sent_len = 0;

#if !(NEW_TX_HANDLING_SUPPORT)
    unsigned int base = p_btif->base;
    unsigned int lsr = 0;
    unsigned int left_len = 0;
    unsigned char *p_data = (unsigned char *)p_buf;
#endif

    /*check parameter valid or not*/
    if ((NULL == p_buf) || (buf_len == 0)) {
        i_ret = ERR_INVALID_PAR;
        return i_ret;
    }
#if NEW_TX_HANDLING_SUPPORT
    ava_len = _get_btif_tx_fifo_room(p_btif);
    sent_len = buf_len <= ava_len ? buf_len : ava_len;
    if (0 < sent_len) {
        int enqueue_len = 0;
        unsigned long flag = 0;
        spin_lock_irqsave(&(p_btif->tx_fifo_spinlock), flag);
        enqueue_len = kfifo_in(p_btif->p_tx_fifo,
                               (unsigned char *)p_buf, sent_len);
        if (sent_len != enqueue_len) {
            BTIF_ERR_FUNC("target tx len:%d, len sent:%d\n",
                          sent_len, enqueue_len);
        }
        i_ret = enqueue_len;
        dsb();
        /*enable BTIF Tx IRQ*/
        hal_btif_tx_ier_ctrl(p_btif, true);
        spin_unlock_irqrestore(&(p_btif->tx_fifo_spinlock), flag);
        BTIF_DBG_FUNC("enqueue len:%d\n", enqueue_len);
    } else {
        i_ret = 0;
    }
#else
    while ((_btif_is_tx_allow(p_btif)) && (sent_len < buf_len)) {
        /*read LSR and check THER or TEMT, either one is 1 means can accept tx data*/
        lsr = BTIF_READ32(BTIF_LSR(base));

        if (lsr & BTIF_LSR_TEMT_BIT) {
            /*Tx Holding Register if empty, which means we can write tx FIFO count to BTIF*/
            ava_len = BTIF_TX_FIFO_SIZE;
        } else if (lsr & BTIF_LSR_THRE_BIT) {
            /*Tx Holding Register if empty, which means we can write (Tx FIFO count - Tx threshold)to BTIF*/
            ava_len = BTIF_TX_FIFO_SIZE - BTIF_TX_FIFO_THRE;
        } else {
            /*this means data size in tx FIFO is more than Tx threshold, we will not write data to THR*/
            ava_len = 0;
            break;
        }

        left_len = buf_len - sent_len;
        /*ava_len will be real length will write to BTIF THR*/
        ava_len = ava_len > left_len ? left_len : ava_len;
        /*update sent length valud after this operation*/
        sent_len += ava_len;
        /*whether we need memory barrier here?
        Ans: No, no memory ordering issue exist,
        CPU will make sure logically right
        */
        while (ava_len--)
            btif_reg_sync_writeb(*(p_data++), BTIF_THR(base));

    }
    /* while ((hal_btif_is_tx_allow()) && (sent_len < buf_len)); */

    i_ret = sent_len;

    /*enable BTIF Tx IRQ*/
    hal_btif_tx_ier_ctrl(p_btif, true);
#endif
    return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_rx_dma_irq_handler
* DESCRIPTION
*  lower level rx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info,
			   unsigned char *p_buf, const unsigned int max_len)
{
	int i_ret = -1;
	unsigned int valid_len = 0;
	unsigned int wpt_wrap = 0;
	unsigned int rpt_wrap = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int tail_len = 0;
	unsigned int real_len = 0;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	dma_rx_buf_write rx_cb = p_dma_info->rx_cb;
	unsigned char *p_vff_buf = NULL;
	unsigned char *vff_base = p_vfifo->p_vir_addr;
	unsigned int vff_size = p_vfifo->vfifo_size;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);
	unsigned long flag = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
			      __FILE__);
		return i_ret;
	}
/*disable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, false);

/*clear Rx DMA's interrupt status*/
	BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE);

	valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));
	rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
	wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	if ((0 == valid_len) && (rpt == wpt)) {
		BTIF_DBG_FUNC
		    ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n",
		     rpt, wpt);
	}

	i_ret = 0;

	while ((0 < valid_len) || (rpt != wpt)) {
		rpt_wrap = rpt & DMA_RPT_WRAP;
		wpt_wrap = wpt & DMA_WPT_WRAP;
		rpt &= DMA_RPT_MASK;
		wpt &= DMA_WPT_MASK;

/*calcaute length of available data  in vFIFO*/
		if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) {
			real_len = wpt + vff_size - rpt;
		} else {
			real_len = wpt - rpt;
		}

		if (NULL != rx_cb) {
			tail_len = vff_size - rpt;
			p_vff_buf = vff_base + rpt;
			if (tail_len >= real_len) {
				(*rx_cb) (p_dma_info, p_vff_buf, real_len);
			} else {
				(*rx_cb) (p_dma_info, p_vff_buf, tail_len);
				p_vff_buf = vff_base;
				(*rx_cb) (p_dma_info, p_vff_buf, real_len -
					  tail_len);
			}
			i_ret += real_len;
		} else {
			BTIF_ERR_FUNC
			    ("no rx_cb found, please check your init process\n");
		}
		dsb();
		rpt += real_len;
		if (rpt >= vff_size) {
/*read wrap bit should be revert*/
			rpt_wrap ^= DMA_RPT_WRAP;
			rpt %= vff_size;
		}
		rpt |= rpt_wrap;
/*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/
		p_mtk_vfifo->wpt = wpt;
		p_mtk_vfifo->last_wpt_wrap = wpt_wrap;

		p_mtk_vfifo->rpt = rpt;
		p_mtk_vfifo->last_rpt_wrap = rpt_wrap;

/*update rpt information to DMA controller*/
		btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base));

/*get vff valid size again and check if rx data is processed completely*/
		valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));

		rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
		wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	}

/*enable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, true);
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
static int hal_tx_dma_dump_reg(P_MTK_DMA_INFO_STR p_dma_info,
			       ENUM_BTIF_REG_ID flag)
{
	int i_ret = -1;
	unsigned int base = p_dma_info->base;
	unsigned int int_flag = 0;
	unsigned int enable = 0;
	unsigned int stop = 0;
	unsigned int flush = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int int_buf = 0;
	unsigned int valid_size = 0;
	/*unsigned long irq_flag = 0;*/

	/*spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);*/
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		/*spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);*/
		BTIF_ERR_FUNC("%s: clock is off, this should never happen!!!\n",
			      __FILE__);
		return i_ret;
	}

	int_flag = BTIF_READ32(TX_DMA_INT_FLAG(base));
	enable = BTIF_READ32(TX_DMA_EN(base));
	stop = BTIF_READ32(TX_DMA_STOP(base));
	flush = BTIF_READ32(TX_DMA_FLUSH(base));
	wpt = BTIF_READ32(TX_DMA_VFF_WPT(base));
	rpt = BTIF_READ32(TX_DMA_VFF_RPT(base));
	int_buf = BTIF_READ32(TX_DMA_INT_BUF_SIZE(base));
	valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base));
	/*spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);*/

	BTIF_INFO_FUNC("DMA's clock is on\n");
	BTIF_INFO_FUNC("Tx DMA's base address: 0x%x\n", base);

	if (REG_TX_DMA_ALL == flag) {
		BTIF_INFO_FUNC("TX_EN(:0x%x\n", enable);
		BTIF_INFO_FUNC("INT_FLAG:0x%x\n", int_flag);
		BTIF_INFO_FUNC("TX_STOP:0x%x\n", stop);
		BTIF_INFO_FUNC("TX_FLUSH:0x%x\n", flush);
		BTIF_INFO_FUNC("TX_WPT:0x%x\n", wpt);
		BTIF_INFO_FUNC("TX_RPT:0x%x\n", rpt);
		BTIF_INFO_FUNC("INT_BUF_SIZE:0x%x\n", int_buf);
		BTIF_INFO_FUNC("VALID_SIZE:0x%x\n", valid_size);
		BTIF_INFO_FUNC("INT_EN:0x%x\n",
			       BTIF_READ32(TX_DMA_INT_EN(base)));
		BTIF_INFO_FUNC("TX_RST:0x%x\n", BTIF_READ32(TX_DMA_RST(base)));
		BTIF_INFO_FUNC("VFF_ADDR:0x%x\n",
			       BTIF_READ32(TX_DMA_VFF_ADDR(base)));
		BTIF_INFO_FUNC("VFF_LEN:0x%x\n",
			       BTIF_READ32(TX_DMA_VFF_LEN(base)));
		BTIF_INFO_FUNC("TX_THRE:0x%x\n",
			       BTIF_READ32(TX_DMA_VFF_THRE(base)));
		BTIF_INFO_FUNC("W_INT_BUF_SIZE:0x%x\n",
			       BTIF_READ32(TX_DMA_W_INT_BUF_SIZE(base)));
		BTIF_INFO_FUNC("LEFT_SIZE:0x%x\n",
			       BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base)));
		BTIF_INFO_FUNC("DBG_STATUS:0x%x\n",
			       BTIF_READ32(TX_DMA_DEBUG_STATUS(base)));
		i_ret = 0;
	} else {
		BTIF_WARN_FUNC("unknown flag:%d\n", flag);
	}
	BTIF_INFO_FUNC("tx dma %s\n", (enable & DMA_EN_BIT) &&
		       (!(stop && DMA_STOP_BIT)) ? "enabled" : "stoped");
	BTIF_INFO_FUNC("data in tx dma is %s sent by HW\n",
		       ((wpt == rpt) &&
			(int_buf == 0)) ? "completely" : "not completely");

	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_irq_handler
* DESCRIPTION
*  lower level tx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_tx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info)
{
#define MAX_CONTINIOUS_TIMES 512
	unsigned int i_ret = -1;
	unsigned int valid_size = 0;
	unsigned int vff_len = 0;
	unsigned int left_len = 0;
	unsigned int base = p_dma_info->base;
	static int flush_irq_counter;
	static struct timeval start_timer;
	static struct timeval end_timer;
	unsigned long flag = 0;
	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC
		    ("%s: clock is off before irq status clear done!!!\n",
		     __FILE__);
		return i_ret;
	}
/*check if Tx VFF Left Size equal to VFIFO size or not*/
	vff_len = BTIF_READ32(TX_DMA_VFF_LEN(base));
	valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base));
	left_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	if (0 == flush_irq_counter) {
		do_gettimeofday(&start_timer);
	}
	if ((0 < valid_size) && (8 > valid_size)) {
		i_ret = _tx_dma_flush(p_dma_info);
		flush_irq_counter++;
		if (MAX_CONTINIOUS_TIMES <= flush_irq_counter) {
			do_gettimeofday(&end_timer);
/*when btif tx fifo cannot accept any data and counts of bytes left in tx vfifo < 8 for a while
we assume that btif cannot send data for a long time
in order not to generate interrupt continiously, which may effect system's performance.
we clear tx flag and disable btif tx interrupt
*/
/*clear interrupt flag*/
			BTIF_CLR_BIT(TX_DMA_INT_FLAG(base),
				     TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
			i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
			BTIF_ERR_FUNC
			    ("**********************ERROR, ERROR, ERROR**************************\n");
			BTIF_ERR_FUNC
			    ("BTIF Tx IRQ happened %d times (continiously), between %d.%d and %d.%d\n",
			     MAX_CONTINIOUS_TIMES, start_timer.tv_sec,
			     start_timer.tv_usec, end_timer.tv_usec,
			     end_timer.tv_usec);
		}
	} else if (vff_len == left_len) {
		flush_irq_counter = 0;
/*clear interrupt flag*/
		BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
		i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
	} else {
#if 0
		BTIF_ERR_FUNC
		    ("**********************WARNING**************************\n");
		BTIF_ERR_FUNC("invalid irq condition, dump register\n");
		hal_dma_dump_reg(p_dma_info, REG_TX_DMA_ALL);
#endif
		BTIF_DBG_FUNC
		    ("superious IRQ occurs, vff_len(%d), valid_size(%d), left_len(%d)\n",
		     vff_len, valid_size, left_len);
	}
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
static void hal_dma_set_default_setting(ENUM_DMA_DIR dma_dir)
{
	struct device_node *node = NULL;
	unsigned int irq_info[3] = {0, 0, 0};
	unsigned int phy_base;

	if (DMA_DIR_RX == dma_dir) {
		node = of_find_compatible_node(NULL, NULL, "mediatek,AP_DMA_BTIF_RX");
		if (node) {
			mtk_btif_rx_dma.p_irq->irq_id = irq_of_parse_and_map(node, 0);
			/*fixme, be compitable arch 64bits*/
			mtk_btif_rx_dma.base = (unsigned long)of_iomap(node, 0);
			BTIF_INFO_FUNC("get rx_dma irq(%d),register base(0x%lx)\n",
				mtk_btif_rx_dma.p_irq->irq_id, mtk_btif_rx_dma.base);
		} else {
			BTIF_ERR_FUNC("get rx_dma device node fail\n");
		}

		/* get the interrupt line behaviour */
	    if (of_property_read_u32_array(node, "interrupts",
				irq_info, ARRAY_SIZE(irq_info))) {
			BTIF_ERR_FUNC("get interrupt flag from DTS fail\n");
		} else {
			mtk_btif_rx_dma.p_irq->irq_flags = irq_info[2];
			BTIF_INFO_FUNC("get interrupt flag(0x%x)\n",
				mtk_btif_rx_dma.p_irq->irq_flags);
		}

		if (of_property_read_u32_index(node, "reg", 0, &phy_base)) {
			BTIF_ERR_FUNC("get register phy base from DTS fail,dma_dir(%d)\n",
				dma_dir);
	    } else {
			BTIF_INFO_FUNC("get register phy base dma_dir(%d)(0x%x)\n",
				dma_dir, (unsigned int)phy_base);
		}
	} else if (DMA_DIR_TX == dma_dir) {
		node = of_find_compatible_node(NULL, NULL, "mediatek,AP_DMA_BTIF_TX");
		if (node) {
			mtk_btif_tx_dma.p_irq->irq_id = irq_of_parse_and_map(node, 0);
			/*fixme, be compitable arch 64bits*/
			mtk_btif_tx_dma.base = (unsigned long)of_iomap(node, 0);
			BTIF_INFO_FUNC("get tx_dma irq(%d),register base(0x%lx)\n",
				mtk_btif_tx_dma.p_irq->irq_id, mtk_btif_tx_dma.base);
		} else {
			BTIF_ERR_FUNC("get tx_dma device node fail\n");
		}

		/* get the interrupt line behaviour */
	    if (of_property_read_u32_array(node, "interrupts",
				irq_info, ARRAY_SIZE(irq_info))) {
			BTIF_ERR_FUNC("get interrupt flag from DTS fail\n");
		} else {
			mtk_btif_tx_dma.p_irq->irq_flags = irq_info[2];
			BTIF_INFO_FUNC("get interrupt flag(0x%x)\n",
				mtk_btif_tx_dma.p_irq->irq_flags);
		}

		if (of_property_read_u32_index(node, "reg", 0, &phy_base)) {
			BTIF_ERR_FUNC("get register phy base from DTS fail,dma_dir(%d)\n",
				dma_dir);
	    } else {
			BTIF_INFO_FUNC("get register phy base dma_dir(%d)(0x%x)\n",
				dma_dir, (unsigned int)phy_base);
		}
	}

}