int hal_btif_dma_hw_init(P_MTK_DMA_INFO_STR p_dma_info)
{
	int i_ret = 0;
	unsigned long base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	P_MTK_BTIF_DMA_VFIFO p_mtk_dma_vfifo = container_of(p_vfifo,
							    MTK_BTIF_DMA_VFIFO,
							    vfifo);

	if (DMA_DIR_RX == p_dma_info->dir) {
/*Rx DMA*/
/*do hardware reset*/
//		BTIF_SET_BIT(RX_DMA_RST(base), DMA_HARD_RST);
//		BTIF_CLR_BIT(RX_DMA_RST(base), DMA_HARD_RST);
		
		BTIF_SET_BIT(RX_DMA_RST(base), DMA_WARM_RST);
		while((0x01 & BTIF_READ32(RX_DMA_EN(base))));
/*write vfifo base address to VFF_ADDR*/
		btif_reg_sync_writel(p_vfifo->phy_addr, RX_DMA_VFF_ADDR(base));
/*write vfifo length to VFF_LEN*/
		btif_reg_sync_writel(p_vfifo->vfifo_size, RX_DMA_VFF_LEN(base));
/*write wpt to VFF_WPT*/
		btif_reg_sync_writel(p_mtk_dma_vfifo->wpt,
				     RX_DMA_VFF_WPT(base));
		btif_reg_sync_writel(p_mtk_dma_vfifo->rpt,
					 RX_DMA_VFF_RPT(base));
/*write vff_thre to VFF_THRESHOLD*/
		btif_reg_sync_writel(p_vfifo->thre, RX_DMA_VFF_THRE(base));
/*clear Rx DMA's interrupt status*/
		BTIF_SET_BIT(RX_DMA_INT_FLAG(base),
			     RX_DMA_INT_DONE | RX_DMA_INT_THRE);

/*enable Rx IER by default*/
		btif_rx_dma_ier_ctrl(p_dma_info, true);
	} else {
/*Tx DMA*/
/*do hardware reset*/
//		BTIF_SET_BIT(TX_DMA_RST(base), DMA_HARD_RST);
//		BTIF_CLR_BIT(TX_DMA_RST(base), DMA_HARD_RST);
		BTIF_SET_BIT(TX_DMA_RST(base), DMA_WARM_RST);
		while((0x01 & BTIF_READ32(TX_DMA_EN(base))));
/*write vfifo base address to VFF_ADDR*/
		btif_reg_sync_writel(p_vfifo->phy_addr, TX_DMA_VFF_ADDR(base));
/*write vfifo length to VFF_LEN*/
		btif_reg_sync_writel(p_vfifo->vfifo_size, TX_DMA_VFF_LEN(base));
/*write wpt to VFF_WPT*/
		btif_reg_sync_writel(p_mtk_dma_vfifo->wpt,
				     TX_DMA_VFF_WPT(base));
		btif_reg_sync_writel(p_mtk_dma_vfifo->rpt,
				     TX_DMA_VFF_RPT(base));
/*write vff_thre to VFF_THRESHOLD*/
		btif_reg_sync_writel(p_vfifo->thre, TX_DMA_VFF_THRE(base));

		BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK);

		hal_btif_dma_ier_ctrl(p_dma_info, false);
	}

	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_rx_dma_irq_handler
* DESCRIPTION
*  lower level rx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info,
			   unsigned char *p_buf, const unsigned int max_len)
{
	int i_ret = -1;
	unsigned int valid_len = 0;
	unsigned int wpt_wrap = 0;
	unsigned int rpt_wrap = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int tail_len = 0;
	unsigned int real_len = 0;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	dma_rx_buf_write rx_cb = p_dma_info->rx_cb;
	unsigned char *p_vff_buf = NULL;
	unsigned char *vff_base = p_vfifo->p_vir_addr;
	unsigned int vff_size = p_vfifo->vfifo_size;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);
	unsigned long flag = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
			      __FILE__);
		return i_ret;
	}
/*disable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, false);

/*clear Rx DMA's interrupt status*/
	BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE);

	valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));
	rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
	wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	if ((0 == valid_len) && (rpt == wpt)) {
		BTIF_DBG_FUNC
		    ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n",
		     rpt, wpt);
	}

	i_ret = 0;

	while ((0 < valid_len) || (rpt != wpt)) {
		rpt_wrap = rpt & DMA_RPT_WRAP;
		wpt_wrap = wpt & DMA_WPT_WRAP;
		rpt &= DMA_RPT_MASK;
		wpt &= DMA_WPT_MASK;

/*calcaute length of available data  in vFIFO*/
		if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) {
			real_len = wpt + vff_size - rpt;
		} else {
			real_len = wpt - rpt;
		}

		if (NULL != rx_cb) {
			tail_len = vff_size - rpt;
			p_vff_buf = vff_base + rpt;
			if (tail_len >= real_len) {
				(*rx_cb) (p_dma_info, p_vff_buf, real_len);
			} else {
				(*rx_cb) (p_dma_info, p_vff_buf, tail_len);
				p_vff_buf = vff_base;
				(*rx_cb) (p_dma_info, p_vff_buf, real_len -
					  tail_len);
			}
			i_ret += real_len;
		} else {
			BTIF_ERR_FUNC
			    ("no rx_cb found, please check your init process\n");
		}
		dsb();
		rpt += real_len;
		if (rpt >= vff_size) {
/*read wrap bit should be revert*/
			rpt_wrap ^= DMA_RPT_WRAP;
			rpt %= vff_size;
		}
		rpt |= rpt_wrap;
/*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/
		p_mtk_vfifo->wpt = wpt;
		p_mtk_vfifo->last_wpt_wrap = wpt_wrap;

		p_mtk_vfifo->rpt = rpt;
		p_mtk_vfifo->last_rpt_wrap = rpt_wrap;

/*update rpt information to DMA controller*/
		btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base));

/*get vff valid size again and check if rx data is processed completely*/
		valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));

		rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
		wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	}

/*enable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, true);
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_irq_handler
* DESCRIPTION
*  lower level tx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_tx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info)
{
#define MAX_CONTINIOUS_TIMES 512
	unsigned int i_ret = -1;
	unsigned int valid_size = 0;
	unsigned int vff_len = 0;
	unsigned int left_len = 0;
	unsigned int base = p_dma_info->base;
	static int flush_irq_counter;
	static struct timeval start_timer;
	static struct timeval end_timer;
	unsigned long flag = 0;
	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC
		    ("%s: clock is off before irq status clear done!!!\n",
		     __FILE__);
		return i_ret;
	}
/*check if Tx VFF Left Size equal to VFIFO size or not*/
	vff_len = BTIF_READ32(TX_DMA_VFF_LEN(base));
	valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base));
	left_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	if (0 == flush_irq_counter) {
		do_gettimeofday(&start_timer);
	}
	if ((0 < valid_size) && (8 > valid_size)) {
		i_ret = _tx_dma_flush(p_dma_info);
		flush_irq_counter++;
		if (MAX_CONTINIOUS_TIMES <= flush_irq_counter) {
			do_gettimeofday(&end_timer);
/*when btif tx fifo cannot accept any data and counts of bytes left in tx vfifo < 8 for a while
we assume that btif cannot send data for a long time
in order not to generate interrupt continiously, which may effect system's performance.
we clear tx flag and disable btif tx interrupt
*/
/*clear interrupt flag*/
			BTIF_CLR_BIT(TX_DMA_INT_FLAG(base),
				     TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
			i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
			BTIF_ERR_FUNC
			    ("**********************ERROR, ERROR, ERROR**************************\n");
			BTIF_ERR_FUNC
			    ("BTIF Tx IRQ happened %d times (continiously), between %d.%d and %d.%d\n",
			     MAX_CONTINIOUS_TIMES, start_timer.tv_sec,
			     start_timer.tv_usec, end_timer.tv_usec,
			     end_timer.tv_usec);
		}
	} else if (vff_len == left_len) {
		flush_irq_counter = 0;
/*clear interrupt flag*/
		BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
		i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
	} else {
#if 0
		BTIF_ERR_FUNC
		    ("**********************WARNING**************************\n");
		BTIF_ERR_FUNC("invalid irq condition, dump register\n");
		hal_dma_dump_reg(p_dma_info, REG_TX_DMA_ALL);
#endif
		BTIF_DBG_FUNC
		    ("superious IRQ occurs, vff_len(%d), valid_size(%d), left_len(%d)\n",
		     vff_len, valid_size, left_len);
	}
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}