int hal_btif_dma_hw_init(P_MTK_DMA_INFO_STR p_dma_info) { int i_ret = 0; unsigned long base = p_dma_info->base; P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo; P_MTK_BTIF_DMA_VFIFO p_mtk_dma_vfifo = container_of(p_vfifo, MTK_BTIF_DMA_VFIFO, vfifo); if (DMA_DIR_RX == p_dma_info->dir) { /*Rx DMA*/ /*do hardware reset*/ // BTIF_SET_BIT(RX_DMA_RST(base), DMA_HARD_RST); // BTIF_CLR_BIT(RX_DMA_RST(base), DMA_HARD_RST); BTIF_SET_BIT(RX_DMA_RST(base), DMA_WARM_RST); while((0x01 & BTIF_READ32(RX_DMA_EN(base)))); /*write vfifo base address to VFF_ADDR*/ btif_reg_sync_writel(p_vfifo->phy_addr, RX_DMA_VFF_ADDR(base)); /*write vfifo length to VFF_LEN*/ btif_reg_sync_writel(p_vfifo->vfifo_size, RX_DMA_VFF_LEN(base)); /*write wpt to VFF_WPT*/ btif_reg_sync_writel(p_mtk_dma_vfifo->wpt, RX_DMA_VFF_WPT(base)); btif_reg_sync_writel(p_mtk_dma_vfifo->rpt, RX_DMA_VFF_RPT(base)); /*write vff_thre to VFF_THRESHOLD*/ btif_reg_sync_writel(p_vfifo->thre, RX_DMA_VFF_THRE(base)); /*clear Rx DMA's interrupt status*/ BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE); /*enable Rx IER by default*/ btif_rx_dma_ier_ctrl(p_dma_info, true); } else { /*Tx DMA*/ /*do hardware reset*/ // BTIF_SET_BIT(TX_DMA_RST(base), DMA_HARD_RST); // BTIF_CLR_BIT(TX_DMA_RST(base), DMA_HARD_RST); BTIF_SET_BIT(TX_DMA_RST(base), DMA_WARM_RST); while((0x01 & BTIF_READ32(TX_DMA_EN(base)))); /*write vfifo base address to VFF_ADDR*/ btif_reg_sync_writel(p_vfifo->phy_addr, TX_DMA_VFF_ADDR(base)); /*write vfifo length to VFF_LEN*/ btif_reg_sync_writel(p_vfifo->vfifo_size, TX_DMA_VFF_LEN(base)); /*write wpt to VFF_WPT*/ btif_reg_sync_writel(p_mtk_dma_vfifo->wpt, TX_DMA_VFF_WPT(base)); btif_reg_sync_writel(p_mtk_dma_vfifo->rpt, TX_DMA_VFF_RPT(base)); /*write vff_thre to VFF_THRESHOLD*/ btif_reg_sync_writel(p_vfifo->thre, TX_DMA_VFF_THRE(base)); BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK); hal_btif_dma_ier_ctrl(p_dma_info, false); } return i_ret; }
/***************************************************************************** * FUNCTION * hal_btif_hw_init * DESCRIPTION * BTIF hardware init * PARAMETERS * p_base [IN] BTIF module's base address * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_btif_hw_init(P_MTK_BTIF_INFO_STR p_btif) { /*Chaozhong: To be implement*/ int i_ret = -1; unsigned int base = p_btif->base; #if NEW_TX_HANDLING_SUPPORT _btif_tx_fifo_reset(p_btif); #endif /*set to normal mode*/ btif_reg_sync_writel(BTIF_FAKELCR_NORMAL_MODE, BTIF_FAKELCR(base)); /*set to newhandshake mode*/ btif_new_handshake_ctrl(p_btif, true); /*No need to access: enable sleep mode*/ /*No need to access: set Rx timeout count*/ /*set Tx threshold*/ /*set Rx threshold*/ /*disable internal loopback test*/ btif_reg_sync_writel(BTIF_TRI_LVL_TX(p_btif->tx_tri_lvl) \ | BTIF_TRI_LVL_RX(p_btif->rx_tri_lvl) \ | BTIF_TRI_LOOP_DIS,\ BTIF_TRI_LVL(base)); hal_btif_loopback_ctrl(p_btif, false); /*disable BTIF Tx DMA mode*/ hal_btif_tx_mode_ctrl(p_btif, false); /*disable BTIF Rx DMA mode*/ hal_btif_rx_mode_ctrl(p_btif, false); /*auto reset*/ BTIF_SET_BIT(BTIF_DMA_EN(base), BTIF_DMA_EN_AUTORST_EN); /*disable Tx IER*/ hal_btif_tx_ier_ctrl( p_btif, false); /*enable Rx IER by default*/ hal_btif_rx_ier_ctrl( p_btif, true); i_ret = 0; return i_ret; }
static int btif_tx_thr_set(P_MTK_BTIF_INFO_STR p_btif, unsigned int thr_count) { int i_ret = -1; unsigned int base = p_btif->base; unsigned int value = 0; /*read BTIF_TRI_LVL*/ value = BTIF_READ32(BTIF_TRI_LVL(base)); /*clear Tx threshold bits*/ value &= (~BTIF_TRI_LVL_TX_MASK); /*set tx threshold bits*/ value |= BTIF_TRI_LVL_TX(BTIF_TX_FIFO_THRE); /*write back to BTIF_TRI_LVL*/ btif_reg_sync_writel(value, BTIF_TRI_LVL(base)); return i_ret; }
/***************************************************************************** * FUNCTION * hal_rx_dma_irq_handler * DESCRIPTION * lower level rx interrupt handler * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * p_buf [IN/OUT] pointer to rx data buffer * max_len [IN] max length of rx buffer * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info, unsigned char *p_buf, const unsigned int max_len) { int i_ret = -1; unsigned int valid_len = 0; unsigned int wpt_wrap = 0; unsigned int rpt_wrap = 0; unsigned int wpt = 0; unsigned int rpt = 0; unsigned int tail_len = 0; unsigned int real_len = 0; unsigned int base = p_dma_info->base; P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo; dma_rx_buf_write rx_cb = p_dma_info->rx_cb; unsigned char *p_vff_buf = NULL; unsigned char *vff_base = p_vfifo->p_vir_addr; unsigned int vff_size = p_vfifo->vfifo_size; P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo, MTK_BTIF_DMA_VFIFO, vfifo); unsigned long flag = 0; spin_lock_irqsave(&(g_clk_cg_spinlock), flag); if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n", __FILE__); return i_ret; } /*disable DMA Rx IER*/ hal_btif_dma_ier_ctrl(p_dma_info, false); /*clear Rx DMA's interrupt status*/ BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE); valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base)); rpt = BTIF_READ32(RX_DMA_VFF_RPT(base)); wpt = BTIF_READ32(RX_DMA_VFF_WPT(base)); if ((0 == valid_len) && (rpt == wpt)) { BTIF_DBG_FUNC ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n", rpt, wpt); } i_ret = 0; while ((0 < valid_len) || (rpt != wpt)) { rpt_wrap = rpt & DMA_RPT_WRAP; wpt_wrap = wpt & DMA_WPT_WRAP; rpt &= DMA_RPT_MASK; wpt &= DMA_WPT_MASK; /*calcaute length of available data in vFIFO*/ if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) { real_len = wpt + vff_size - rpt; } else { real_len = wpt - rpt; } if (NULL != rx_cb) { tail_len = vff_size - rpt; p_vff_buf = vff_base + rpt; if (tail_len >= real_len) { (*rx_cb) (p_dma_info, p_vff_buf, real_len); } else { (*rx_cb) (p_dma_info, p_vff_buf, tail_len); p_vff_buf = vff_base; (*rx_cb) (p_dma_info, p_vff_buf, real_len - tail_len); } i_ret += real_len; } else { BTIF_ERR_FUNC ("no rx_cb found, please check your init process\n"); } dsb(); rpt += real_len; if (rpt >= vff_size) { /*read wrap bit should be revert*/ rpt_wrap ^= DMA_RPT_WRAP; rpt %= vff_size; } rpt |= rpt_wrap; /*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/ p_mtk_vfifo->wpt = wpt; p_mtk_vfifo->last_wpt_wrap = wpt_wrap; p_mtk_vfifo->rpt = rpt; p_mtk_vfifo->last_rpt_wrap = rpt_wrap; /*update rpt information to DMA controller*/ btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base)); /*get vff valid size again and check if rx data is processed completely*/ valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base)); rpt = BTIF_READ32(RX_DMA_VFF_RPT(base)); wpt = BTIF_READ32(RX_DMA_VFF_WPT(base)); } /*enable DMA Rx IER*/ hal_btif_dma_ier_ctrl(p_dma_info, true); spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); return i_ret; }
/***************************************************************************** * FUNCTION * hal_dma_send_data * DESCRIPTION * send data through btif in DMA mode * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * p_buf [IN] pointer to rx data buffer * max_len [IN] tx buffer length * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_dma_send_data(P_MTK_DMA_INFO_STR p_dma_info, const unsigned char *p_buf, const unsigned int buf_len) { unsigned int i_ret = -1; unsigned int base = p_dma_info->base; P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo; unsigned int len_to_send = buf_len; unsigned int ava_len = 0; unsigned int wpt = 0; unsigned int last_wpt_wrap = 0; unsigned int vff_size = 0; unsigned char *p_data = (unsigned char *)p_buf; P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo, MTK_BTIF_DMA_VFIFO, vfifo); BTIF_TRC_FUNC(); if ((NULL == p_buf) || (0 == buf_len)) { i_ret = ERR_INVALID_PAR; BTIF_ERR_FUNC("invalid parameters, p_buf:0x%08x, buf_len:%d\n", p_buf, buf_len); return i_ret; } /*check if tx dma in flush operation? if yes, should wait until DMA finish flush operation*/ /*currently uplayer logic will make sure this pre-condition*/ /*disable Tx IER, in case Tx irq happens, flush bit may be set in irq handler*/ btif_tx_dma_ier_ctrl(p_dma_info, false); vff_size = p_mtk_vfifo->vfifo.vfifo_size; ava_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base)); wpt = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_MASK; last_wpt_wrap = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_WRAP; /*copy data to vFIFO, Note: ava_len should always large than buf_len, otherwise common logic layer will not call hal_dma_send_data*/ if (buf_len > ava_len) { BTIF_ERR_FUNC ("length to send:(%d) < length available(%d), abnormal!!!---!!!\n", buf_len, ava_len); BUG_ON(buf_len > ava_len); /* this will cause kernel panic */ } len_to_send = buf_len < ava_len ? buf_len : ava_len; if (len_to_send + wpt >= vff_size) { unsigned int tail_len = vff_size - wpt; memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt), p_data, tail_len); p_data += tail_len; memcpy(p_mtk_vfifo->vfifo.p_vir_addr, p_data, len_to_send - tail_len); /*make sure all data write to memory area tx vfifo locates*/ dsb(); /*calculate WPT*/ wpt = wpt + len_to_send - vff_size; last_wpt_wrap ^= DMA_WPT_WRAP; } else { memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt), p_data, len_to_send); /*make sure all data write to memory area tx vfifo locates*/ dsb(); /*calculate WPT*/ wpt += len_to_send; } p_mtk_vfifo->wpt = wpt; p_mtk_vfifo->last_wpt_wrap = last_wpt_wrap; /*make sure tx dma is allowed(tx flush bit is not set) to use before update WPT*/ if (hal_dma_is_tx_allow(p_dma_info)) { /*make sure tx dma enabled*/ hal_btif_dma_ctrl(p_dma_info, DMA_CTRL_ENABLE); /*update WTP to Tx DMA controller's control register*/ btif_reg_sync_writel(wpt | last_wpt_wrap, TX_DMA_VFF_WPT(base)); if ((8 > BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base))) && (0 < BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)))) { /*0 < valid size in Tx vFIFO < 8 && TX Flush is not in process<should always be done>? if yes, set flush bit to DMA*/ _tx_dma_flush(p_dma_info); } i_ret = len_to_send; } else { /*TODO: print error log*/ BTIF_ERR_FUNC ("Tx DMA flush operation is in process, this case should never happen, please check if tx operation is allowed before call this API\n"); /*if flush operation is in process , we will return 0*/ i_ret = 0; } /*Enable Tx IER*/ btif_tx_dma_ier_ctrl(p_dma_info, true); BTIF_TRC_FUNC(); return i_ret; }