/* * This function reads multiple data from SDIO card memory. */ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, u32 len, u32 port, u8 claim) { struct sdio_mmc_card *card = adapter->card; int ret; u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) : len; u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); if (claim) sdio_claim_host(card->func); ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); if (claim) sdio_release_host(card->func); return ret; }
static u32 ssb_sdio_read32(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; u32 val = 0xffffffff; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* */ val = sdio_readl(bus->host_sdio, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); return val; }
void sdio_free_irq(struct dvobj_priv *dvobj) { PSDIO_DATA psdio_data; struct sdio_func *func; int err; if (dvobj->irq_alloc) { psdio_data = &dvobj->intf_data; func = psdio_data->func; if (func) { sdio_claim_host(func); err = sdio_release_irq(func); if (err) { DBG_871X("%s: sdio_release_irq FAIL(%d)!\n", __func__, err); } sdio_release_host(func); } dvobj->irq_alloc = 0; } }
/* * Return: * 0 Success * others Fail */ s32 sd_cmd52_write(PSDIO_DATA psdio, u32 addr, u32 cnt, u8 *pdata) { int err, i; struct sdio_func *func; bool claim_needed; _func_enter_; err = 0; func = psdio->func; claim_needed = rtw_sdio_claim_host_needed(func); if (claim_needed) sdio_claim_host(func); err = _sd_cmd52_write(psdio, addr, cnt, pdata); if (claim_needed) sdio_release_host(func); _func_exit_; return err; }
/** * @brief This function reads multiple bytes from card memory * * @param handle A Pointer to the moal_handle structure * @param pmbuf Pointer to mlan_buffer structure * @param port Port * @param timeout Time out value * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ mlan_status woal_read_data_sync(moal_handle * handle, mlan_buffer * pmbuf, t_u32 port, t_u32 timeout) { mlan_status ret = MLAN_STATUS_FAILURE; t_u8 *buffer = (t_u8 *) (pmbuf->pbuf + pmbuf->data_offset); t_u8 blkmode = (port & MLAN_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; t_u32 blksz = (blkmode == BLOCK_MODE) ? MLAN_SDIO_BLOCK_SIZE : 1; t_u32 blkcnt = (blkmode == BLOCK_MODE) ? (pmbuf->data_len / MLAN_SDIO_BLOCK_SIZE) : pmbuf->data_len; t_u32 ioport = (port & MLAN_SDIO_IO_PORT_MASK); int status = 0; if (pmbuf->use_count > 1) return woal_sdio_rw_mb(handle, pmbuf, port, MFALSE); #ifdef SDIO_MMC_DEBUG handle->cmd53r = 1; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) sdio_claim_host(((struct sdio_mmc_card *)handle->card)->func); #endif status = sdio_readsb(((struct sdio_mmc_card *)handle->card)->func, buffer, ioport, blkcnt * blksz); if (!status) { ret = MLAN_STATUS_SUCCESS; } else { PRINTM(MERROR, "cmd53 read error=%d\n", status); woal_dump_sdio_reg(handle); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) sdio_release_host(((struct sdio_mmc_card *)handle->card)->func); #endif #ifdef SDIO_MMC_DEBUG handle->cmd53r = 2; #endif return ret; }
static int if_sdio_disable(struct iwm_priv *iwm) { struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); int ret; sdio_claim_host(hw->func); sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret); if (ret < 0) IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret); sdio_release_irq(hw->func); sdio_disable_func(hw->func); sdio_release_host(hw->func); iwm_sdio_rx_free(hw); iwm_reset(iwm); IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n"); return 0; }
static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { int ret; struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Power off the card manually in case it wasn't powered off above */ ret = 0; mmc_power_save_host(card->host); if (ret < 0) goto out; /* Let runtime PM know the card is powered off */ pm_runtime_put_sync(&card->dev); out: return ret; }
/* * Setup SDIO RX * * Hooks up the IRQ handler and then enables IRQs. */ int i2400ms_rx_setup(struct i2400ms *i2400ms) { int result; struct sdio_func *func = i2400ms->func; struct device *dev = &func->dev; struct i2400m *i2400m = &i2400ms->i2400m; d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms); init_waitqueue_head(&i2400ms->bm_wfa_wq); spin_lock(&i2400m->rx_lock); i2400ms->bm_wait_result = -EINPROGRESS; /* * Before we are about to enable the RX interrupt, make sure * bm_ack_size is cleared to -EINPROGRESS which indicates * no RX interrupt happened yet or the previous interrupt * has been handled, we are ready to take the new interrupt */ i2400ms->bm_ack_size = -EINPROGRESS; spin_unlock(&i2400m->rx_lock); sdio_claim_host(func); result = sdio_claim_irq(func, i2400ms_irq); if (result < 0) { dev_err(dev, "Cannot claim IRQ: %d\n", result); goto error_irq_claim; } result = 0; sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result); if (result < 0) { sdio_release_irq(func); dev_err(dev, "Failed to enable interrupts %d\n", result); } error_irq_claim: sdio_release_host(func); d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result); return result; }
static int wl1271_sdio_power_off(struct wl1271 *wl) { struct sdio_func *func = wl_to_func(wl); int ret; sdio_disable_func(func); sdio_release_host(func); /* Power off the card manually, even if runtime PM is enabled. */ ret = mmc_power_save_host(func->card->host); if (ret < 0) { printk (KERN_ERR "%s:mmc_power_save_host: %d\n", __func__, ret ); return ret; } /* If enabled, let runtime PM know the card is powered off */ if (pm_runtime_enabled(&func->dev)) ret = pm_runtime_put_sync(&func->dev); printk (KERN_ERR "%s: %d\n", __func__, ret ); return ret; }
static void mtk_sdio_remove ( struct sdio_func *func ) { //printk(KERN_INFO DRV_NAME"mtk_sdio_remove()\n"); #if CFG_DBG_GPIO_PINS //printk(KERN_INFO "[%s] deinit debug gpio \n", __FUNCTION__); debug_gpio_deinit(); #endif ASSERT(func); //printk(KERN_INFO DRV_NAME"pfWlanRemove done\n"); pfWlanRemove(); sdio_claim_host(func); sdio_disable_func(func); //printk(KERN_INFO DRV_NAME"sdio_disable_func() done\n"); sdio_release_host(func); //printk(KERN_INFO DRV_NAME"mtk_sdio_remove() done\n"); }
u8 sd_f0_read8(PSDIO_DATA psdio, u32 addr, s32 *err) { u8 v; struct sdio_func *func; bool claim_needed; _func_enter_; func = psdio->func; claim_needed = rtw_sdio_claim_host_needed(func); if (claim_needed) sdio_claim_host(func); v = sdio_f0_readb(func, addr, err); if (claim_needed) sdio_release_host(func); if (err && *err) DBG_871X(KERN_ERR "%s: FAIL!(%d) addr=0x%05x\n", __func__, *err, addr); _func_exit_; return v; }
void sif_set_clock(struct sdio_func *func, int clk) { struct mmc_host *host = NULL; struct mmc_card *card = NULL; card = func->card; host = card->host; sdio_claim_host(func); //currently only set clock host->ios.clock = clk * 1000000; esp_dbg(ESP_SHOW, "%s clock is %u\n", __func__, host->ios.clock); if (host->ios.clock > host->f_max) { host->ios.clock = host->f_max; } host->ops->set_ios(host, &host->ios); mdelay(2); sdio_release_host(func); }
/** * @brief This function reads multiple bytes from card memory * * @param handle A Pointer to the moal_handle structure * @param pmbuf Pointer to mlan_buffer structure * @param port Port * @param timeout Time out value * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ mlan_status woal_read_data_sync(moal_handle * handle, mlan_buffer * pmbuf, t_u32 port, t_u32 timeout) { mlan_status ret = MLAN_STATUS_FAILURE; t_u8 *buffer = (t_u8 *) (pmbuf->pbuf + pmbuf->data_offset); t_u8 blkmode = (port & MLAN_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; t_u32 blksz = (blkmode == BLOCK_MODE) ? MLAN_SDIO_BLOCK_SIZE : 1; t_u32 blkcnt = (blkmode == BLOCK_MODE) ? (pmbuf->data_len / MLAN_SDIO_BLOCK_SIZE) : pmbuf->data_len; t_u32 ioport = (port & MLAN_SDIO_IO_PORT_MASK); sdio_claim_host(((struct sdio_mmc_card *) handle->card)->func); if (!sdio_readsb (((struct sdio_mmc_card *) handle->card)->func, buffer, ioport, blkcnt * blksz)) ret = MLAN_STATUS_SUCCESS; sdio_release_host(((struct sdio_mmc_card *) handle->card)->func); return ret; }
static void r8712s_dev_remove(struct sdio_func *func) { _adapter *padapter = (_adapter*) (((struct dvobj_priv*)sdio_get_drvdata(func))->padapter); struct net_device *pnetdev = (struct net_device *)padapter->pnetdev; _func_exit_; if (padapter) { RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n")); // padapter->bSurpriseRemoved = _TRUE; if (pnetdev) unregister_netdev(pnetdev); //will call netdev_close() cancel_all_timer(padapter); r871x_dev_unload(padapter); rtw_free_drv_sw(padapter); sdio_claim_host(func); RT_TRACE(_module_hci_intfs_c_,_drv_err_,(" in dev_remove():sdio_claim_host !\n")); sdio_release_irq(func); RT_TRACE(_module_hci_intfs_c_,_drv_err_,(" in dev_remove():sdio_release_irq !\n")); sdio_disable_func(func); RT_TRACE(_module_hci_intfs_c_,_drv_err_,(" in dev_remove():sdio_disable_func !\n")); sdio_release_host(func); RT_TRACE(_module_hci_intfs_c_,_drv_err_,(" in dev_remove():sdio_release_host !\n")); } RT_TRACE(_module_hci_intfs_c_,_drv_err_,("-dev_remove()\n")); _func_exit_; return; }
static INT32 mtk_sdio_interrupt(MTK_WCN_HIF_SDIO_CLTCTX cltCtx) { P_GLUE_INFO_T prGlueInfo = NULL; INT32 ret = 0; struct sdio_func* func; func = hif_sdio_ctx_to_func_wapper(cltCtx); sdio_release_host(func); prGlueInfo = mtk_wcn_hif_sdio_get_drvdata(cltCtx); ASSERT(prGlueInfo); if (!prGlueInfo) { //printk(KERN_INFO DRV_NAME"No glue info in mtk_sdio_interrupt()\n"); return (-HIF_SDIO_ERR_FAIL); } if (prGlueInfo->u4Flag & GLUE_FLAG_HALT) { //printk(KERN_INFO DRV_NAME"GLUE_FLAG_HALT skip INT\n"); ret = mtk_wcn_hif_sdio_writel(cltCtx, MCR_WHLPCR, WHLPCR_INT_EN_CLR); return ret; } ret = mtk_wcn_hif_sdio_writel(cltCtx, MCR_WHLPCR, WHLPCR_INT_EN_CLR); set_bit (GLUE_FLAG_INT_BIT, &prGlueInfo->u4Flag); /* when we got sdio interrupt, we wake up the tx servie thread*/ wake_up_interruptible(&prGlueInfo->waitq); sdio_claim_host(func); return ret; }
/* get MAC address from device */ void hw_get_mac_address(void *data) { struct net_adapter *adapter = (struct net_adapter *)data; struct hw_private_packet req; int nResult = 0; int retry = 3; req.id0 = 'W'; req.id1 = 'P'; req.code = HwCodeMacRequest; req.value = 0; do { if (adapter == NULL) break; sdio_claim_host(adapter->func); nResult = sd_send(adapter, (u_char *)&req, sizeof(struct hw_private_packet)); sdio_release_host(adapter->func); if (nResult != STATUS_SUCCESS) dump_debug("hw_get_mac_address: sd_send fail!!"); msleep(300); retry--; /*in case we dont get MAC we need to release power lock and probe finsh */ if (!retry) { adapter->download_complete = TRUE; wake_up_interruptible(&adapter->download_event); msleep(100); } } while ((!adapter->mac_ready) && (!adapter->halted) && retry); adapter->pdata->g_cfg->powerup_done = true ; dump_debug("MAC thread exit"); return; }
static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, size_t len, bool fixed) { int ret; struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_claim_host(func); if (unlikely(dump)) { printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); } if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", addr, ((u8 *)buf)[0]); } else { if (fixed) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n", addr, len); } sdio_release_host(func); if (WARN_ON(ret)) dev_err(child->parent, "sdio read failed (%d)\n", ret); return ret; }
static int wl1271_suspend(struct device *dev) { /* Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely */ struct sdio_func *func = dev_to_sdio_func(dev); struct wl1271 *wl = sdio_get_drvdata(func); mmc_pm_flag_t sdio_flags; int ret = 0; wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d", wl->wow_enabled); /* check whether sdio should keep power */ if (wl->wow_enabled) { sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { wl1271_error("can't keep power while host " "is suspended"); ret = -EINVAL; goto out; } printk("\n\nSetting MMC_PM_KEEP_POWER\n"); /* keep power while host suspended */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { wl1271_error("error while trying to keep power"); goto out; } /* release host */ sdio_release_host(func); } out: return ret; }
/** * @brief This function checks the interrupt status and handle it accordingly. * * @param priv A pointer to bt_private structure * @param ireg A pointer to variable that keeps returned value * @return BT_STATUS_SUCCESS */ int sbi_get_int_status(bt_private * priv, u8 * ireg) { int ret = BT_STATUS_SUCCESS; u8 sdio_ireg = 0; struct sdio_mmc_card *card = priv->bt_dev.card; struct hci_dev *hdev = priv->bt_dev.hcidev; ENTER(); *ireg = 0; OS_INT_DISABLE; sdio_ireg = priv->adapter->sd_ireg; priv->adapter->sd_ireg = 0; OS_INT_RESTORE; sdio_claim_host(card->func); PRINTM(INTR, "BT: get_int_status %s: sdio_ireg=0x%x\n", hdev->name, sdio_ireg); priv->adapter->irq_done = sdio_ireg; if (sdio_ireg & DN_LD_HOST_INT_STATUS) { /* tx_done INT */ if (priv->bt_dev.tx_dnld_rdy) { /* tx_done already received */ PRINTM(INFO, "BT: warning: tx_done already received: tx_dnld_rdy=0x%x int status=0x%x\n", priv->bt_dev.tx_dnld_rdy, sdio_ireg); } else { priv->bt_dev.tx_dnld_rdy = TRUE; } } if (sdio_ireg & UP_LD_HOST_INT_STATUS) sd_card_to_host(priv); *ireg = sdio_ireg; ret = BT_STATUS_SUCCESS; sdio_release_host(card->func); LEAVE(); return ret; }
u8 sd_f0_read8(struct intf_hdl *pintfhdl,u32 addr, s32 *err) { PADAPTER padapter; struct dvobj_priv *psdiodev; PSDIO_DATA psdio; u8 v=0; struct sdio_func *func; bool claim_needed; _func_enter_; padapter = pintfhdl->padapter; psdiodev = pintfhdl->pintf_dev; psdio = &psdiodev->intf_data; if(padapter->bSurpriseRemoved){ //DBG_871X(" %s (padapter->bSurpriseRemoved )!!!\n",__FUNCTION__); return v; } func = psdio->func; claim_needed = rtw_sdio_claim_host_needed(func); if (claim_needed) sdio_claim_host(func); v = sdio_f0_readb(func, addr, err); if (claim_needed) sdio_release_host(func); if (err && *err) DBG_871X(KERN_ERR "%s: FAIL!(%d) addr=0x%05x\n", __func__, *err, addr); _func_exit_; return v; }
u8 RecvOnePkt(struct adapter *adapter, u32 size) { struct recv_buf *recvbuf; struct dvobj_priv *sddev; PSDIO_DATA psdio_data; struct sdio_func *func; u8 res = false; DBG_871X("+%s: size: %d+\n", __func__, size); if (!adapter) { DBG_871X(KERN_ERR "%s: adapter is NULL!\n", __func__); return false; } sddev = adapter_to_dvobj(adapter); psdio_data = &sddev->intf_data; func = psdio_data->func; if (size) { sdio_claim_host(func); recvbuf = sd_recv_rxfifo(adapter, size); if (recvbuf) { /* printk("Completed Recv One Pkt.\n"); */ sd_rxhandler(adapter, recvbuf); res = true; } else { res = false; } sdio_release_host(func); } DBG_871X("-%s-\n", __func__); return res; }
static u32 sdio_init(struct dvobj_priv *dvobj) { PSDIO_DATA psdio_data; struct sdio_func *func; int err; _func_enter_; psdio_data = &dvobj->intf_data; func = psdio_data->func; // 1. init SDIO bus sdio_claim_host(func); err = sdio_enable_func(func); if (err) { DBG_871X(KERN_CRIT "%s: sdio_enable_func FAIL(%d)!\n", __func__, err); goto release; } err = sdio_set_block_size(func, 512); if (err) { DBG_871X(KERN_CRIT "%s: sdio_set_block_size FAIL(%d)!\n", __func__, err); goto release; } psdio_data->block_transfer_len = 512; psdio_data->tx_block_mode = 1; psdio_data->rx_block_mode = 1; release: sdio_release_host(func); _func_exit_; if (err) return _FAIL; return _SUCCESS; }
/*----------------------------------------------------------------------------*/ INT_32 glBusSetIrq ( PVOID pvData, PVOID pfnIsr, PVOID pvCookie ) { int ret = 0; struct net_device *prNetDevice = NULL; P_GLUE_INFO_T prGlueInfo = NULL; P_GL_HIF_INFO_T prHifInfo = NULL; ASSERT(pvData); if (!pvData) { return -1; } prNetDevice = (struct net_device *) pvData; prGlueInfo = (P_GLUE_INFO_T) pvCookie; ASSERT(prGlueInfo); if (!prGlueInfo) { return -1; } prHifInfo = &prGlueInfo->rHifInfo; #if (MTK_WCN_HIF_SDIO == 0) sdio_claim_host(prHifInfo->func); ret = sdio_claim_irq(prHifInfo->func, mtk_sdio_interrupt); sdio_release_host(prHifInfo->func); #else mtk_wcn_hif_sdio_enable_irq(prHifInfo->cltCtx, TRUE); #endif return ret; } /* end of glBusSetIrq() */
u8 RecvOnePkt(PADAPTER padapter, u32 size) { struct recv_buf *precvbuf; struct dvobj_priv *psddev; PSDIO_DATA psdio_data; struct sdio_func *func; u8 res = _FALSE; DBG_871X("+%s: size: %d+\n", __func__, size); if (padapter == NULL) { DBG_871X(KERN_ERR "%s: padapter is NULL!\n", __func__); return _FALSE; } psddev = adapter_to_dvobj(padapter); psdio_data = &psddev->intf_data; func = psdio_data->func; if(size) { sdio_claim_host(func); precvbuf = sd_recv_rxfifo(padapter, size); if (precvbuf) { //printk("Completed Recv One Pkt.\n"); sd_rxhandler(padapter, precvbuf); res = _TRUE; }else{ res = _FALSE; } sdio_release_host(func); } DBG_871X("-%s-\n", __func__); return res; }
/* * Setup SDIO RX * * Hooks up the IRQ handler and then enables IRQs. */ int i2400ms_rx_setup(struct i2400ms *i2400ms) { int result; struct sdio_func *func = i2400ms->func; struct device *dev = &func->dev; d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms); sdio_claim_host(func); result = sdio_claim_irq(func, i2400ms_irq); if (result < 0) { dev_err(dev, "Cannot claim IRQ: %d\n", result); goto error_irq_claim; } result = 0; sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result); if (result < 0) { sdio_release_irq(func); dev_err(dev, "Failed to enable interrupts %d\n", result); } error_irq_claim: sdio_release_host(func); d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result); return result; }
void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err) { PADAPTER padapter; struct dvobj_priv *psdiodev; PSDIO_DATA psdio; struct sdio_func *func; bool claim_needed; _func_enter_; padapter = pintfhdl->padapter; psdiodev = pintfhdl->pintf_dev; psdio = &psdiodev->intf_data; if(padapter->bSurpriseRemoved){ //DBG_871X(" %s (padapter->bSurpriseRemoved)!!!\n",__FUNCTION__); return ; } func = psdio->func; claim_needed = rtw_sdio_claim_host_needed(func); if (claim_needed) sdio_claim_host(func); sdio_writel(func, v, addr, err); if (claim_needed) sdio_release_host(func); if (err && *err) { int i; DBG_871X(KERN_ERR "%s: (%d) addr=0x%05x val=0x%08x\n", __func__, *err, addr, v); *err = 0; for(i=0; i<SD_IO_TRY_CNT; i++) { if (claim_needed) sdio_claim_host(func); sdio_writel(func, v, addr, err); if (claim_needed) sdio_release_host(func); if (*err == 0){ rtw_reset_continual_io_error(psdiodev); break; }else{ DBG_871X(KERN_ERR "%s: (%d) addr=0x%05x, val=0x%x, try_cnt=%d\n", __func__, *err, addr, v, i); if(( -ESHUTDOWN == *err ) || ( -ENODEV == *err)){ padapter->bSurpriseRemoved = _TRUE; } if(rtw_inc_and_chk_continual_io_error(psdiodev) == _TRUE ){ padapter->bSurpriseRemoved = _TRUE; break; } } } if (i==SD_IO_TRY_CNT) DBG_871X(KERN_ERR "%s: FAIL!(%d) addr=0x%05x val=0x%08x, try_cnt=%d\n", __func__, *err, addr, v, i); else DBG_871X(KERN_ERR "%s: (%d) addr=0x%05x val=0x%08x, try_cnt=%d\n", __func__, *err, addr, v, i); } _func_exit_; }
void hw_transmit_thread(struct work_struct *work) { struct buffer_descriptor *dsc; struct hw_private_packet hdr; struct net_adapter *adapter; int nRet = 0; adapter = container_of(work, struct net_adapter, transmit_work); struct wimax_cfg *g_cfg = adapter->pdata->g_cfg; wake_lock_timeout(&g_cfg->wimax_rxtx_lock, 0.2 * HZ); mutex_lock(&adapter->rx_lock); if (!gpio_get_value(WIMAX_EN)) { dump_debug("WiMAX Power OFF!! (TX)"); adapter->halted = TRUE; return; } /* prevent WiMAX modem suspend during tx phase */ mutex_lock(&g_cfg->suspend_mutex); hw_device_wakeup(adapter); while (!queue_empty(adapter->hw.q_send.head)) { if (adapter->halted) { /* send stop message */ hdr.id0 = 'W'; hdr.id1 = 'P'; hdr.code = HwCodeHaltedIndication; hdr.value = 0; if (sd_send(adapter, (unsigned char *)&hdr, sizeof(struct hw_private_packet))) dump_debug("halted," " send HaltIndication to FW err"); break; } dsc = (struct buffer_descriptor *) queue_get_head(adapter->hw.q_send.head); if (!dsc->buffer) { dump_debug("dsc->buffer is NULL"); break; } if (!dsc) { dump_debug("Fail...node is null"); mutex_unlock(&g_cfg->suspend_mutex); break; } sdio_claim_host(adapter->func); nRet = sd_send_data(adapter, dsc); sdio_release_host(adapter->func); queue_remove_head(adapter->hw.q_send.head); kfree(dsc->buffer); kfree(dsc); if (nRet != STATUS_SUCCESS) { dump_debug("SendData Fail******"); ++adapter->XmitErr; if (nRet == -ENOMEDIUM || nRet == /*-ETIMEOUT*/-110) { adapter->halted = TRUE; break; } } } mutex_unlock(&g_cfg->suspend_mutex); mutex_unlock(&adapter->rx_lock); return ; }
static int hifDeviceResume(struct device *dev) { struct task_struct* pTask; const char *taskName; int (*taskFunc)(void *); struct sdio_func *func = dev_to_sdio_func(dev); A_STATUS ret = A_OK; HIF_DEVICE *device; device = getHifDevice(func); if (device->is_suspend) { /* enable the SDIO function */ sdio_claim_host(func); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) /* give us some time to enable, in ms */ func->enable_timeout = 100; #endif ret = sdio_enable_func(func); if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to enable AR6K: 0x%X\n", __FUNCTION__, ret)); sdio_release_host(func); return ret; } ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); sdio_release_host(func); if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to set block size 0x%x AR6K: 0x%X\n", __FUNCTION__, HIF_MBOX_BLOCK_SIZE, ret)); return ret; } device->is_suspend = FALSE; /* create async I/O thread */ if (!device->async_task) { device->async_shutdown = 0; device->async_task = kthread_create(async_task, (void *)device, "AR6K Async"); if (IS_ERR(device->async_task)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create async task\n", __FUNCTION__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start async task\n")); wake_up_process(device->async_task ); } } if (!device->claimedContext) { printk("WARNING!!! No claimedContext during resume wlan\n"); taskFunc = startup_task; taskName = "AR6K startup"; } else { taskFunc = resume_task; taskName = "AR6K resume"; } /* create resume thread */ pTask = kthread_create(taskFunc, (void *)device, taskName); if (IS_ERR(pTask)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create resume task\n", __FUNCTION__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start resume task\n")); wake_up_process(pTask); return A_SUCCESS(ret) ? 0 : ret; }
static int hifDeviceInserted(struct sdio_func *func, const struct sdio_device_id *id) { int ret; HIF_DEVICE * device; int count; struct task_struct* startup_task_struct; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: hifDeviceInserted, Function: 0x%X, Vendor ID: 0x%X, Device ID: 0x%X, block size: 0x%X/0x%X\n", func->num, func->vendor, func->device, func->max_blksize, func->cur_blksize)); addHifDevice(func); device = getHifDevice(func); spin_lock_init(&device->lock); spin_lock_init(&device->asynclock); DL_LIST_INIT(&device->ScatterReqHead); if (!nohifscattersupport) { /* try to allow scatter operation on all instances, * unless globally overridden */ device->scatter_enabled = TRUE; } /* enable the SDIO function */ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: claim\n")); sdio_claim_host(func); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: enable\n")); if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) >= MANUFACTURER_ID_AR6003_BASE) { /* enable 4-bit ASYNC interrupt on AR6003 or later devices */ ret = Func0_CMD52WriteByte(func->card, CCCR_SDIO_IRQ_MODE_REG, SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6000: failed to enable 4-bit ASYNC IRQ mode %d \n",ret)); sdio_release_host(func); return ret; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: 4-bit ASYNC IRQ mode enabled\n")); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) /* give us some time to enable, in ms */ func->enable_timeout = 100; #endif ret = sdio_enable_func(func); if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to enable AR6K: 0x%X\n", __FUNCTION__, ret)); sdio_release_host(func); return ret; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: set block size 0x%X\n", HIF_MBOX_BLOCK_SIZE)); ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); sdio_release_host(func); if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to set block size 0x%x AR6K: 0x%X\n", __FUNCTION__, HIF_MBOX_BLOCK_SIZE, ret)); return ret; } /* Initialize the bus requests to be used later */ A_MEMZERO(device->busRequest, sizeof(device->busRequest)); for (count = 0; count < BUS_REQUEST_MAX_NUM; count ++) { sema_init(&device->busRequest[count].sem_req, 0); hifFreeBusRequest(device, &device->busRequest[count]); } /* create async I/O thread */ device->async_shutdown = 0; device->async_task = kthread_create(async_task, (void *)device, "AR6K Async"); if (IS_ERR(device->async_task)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create async task\n", __FUNCTION__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start async task\n")); sema_init(&device->sem_async, 0); wake_up_process(device->async_task ); /* create startup thread */ startup_task_struct = kthread_create(startup_task, (void *)device, "AR6K startup"); if (IS_ERR(startup_task_struct)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create startup task\n", __FUNCTION__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start startup task\n")); wake_up_process(startup_task_struct); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: return %d\n", ret)); return ret; }
/* thread to serialize all requests, both sync and async */ static int async_task(void *param) { HIF_DEVICE *device; BUS_REQUEST *request; A_STATUS status; unsigned long flags; device = (HIF_DEVICE *)param; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task\n")); set_current_state(TASK_INTERRUPTIBLE); while(!device->async_shutdown) { /* wait for work */ if (down_interruptible(&device->sem_async) != 0) { /* interrupted, exit */ AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task interrupted\n")); break; } if (device->async_shutdown) { AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task stopping\n")); break; } /* we want to hold the host over multiple cmds if possible, but holding the host blocks card interrupts */ sdio_claim_host(device->func); spin_lock_irqsave(&device->asynclock, flags); /* pull the request to work on */ while (device->asyncreq != NULL) { request = device->asyncreq; if (request->inusenext != NULL) { device->asyncreq = request->inusenext; } else { device->asyncreq = NULL; } spin_unlock_irqrestore(&device->asynclock, flags); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task processing req: 0x%X\n", (unsigned int)request)); if (request->pScatterReq != NULL) { A_ASSERT(device->scatter_enabled); /* this is a queued scatter request, pass the request to scatter routine which * executes it synchronously, note, no need to free the request since scatter requests * are maintained on a separate list */ status = DoHifReadWriteScatter(device,request); } else { /* call HIFReadWrite in sync mode to do the work */ status = __HIFReadWrite(device, request->address, request->buffer, request->length, request->request & ~HIF_SYNCHRONOUS, NULL); if (request->request & HIF_ASYNCHRONOUS) { void *context = request->context; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task freeing req: 0x%X\n", (unsigned int)request)); hifFreeBusRequest(device, request); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task completion routine req: 0x%X\n", (unsigned int)request)); device->htcCallbacks.rwCompletionHandler(context, status); } else { AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task upping req: 0x%X\n", (unsigned int)request)); request->status = status; up(&request->sem_req); } } spin_lock_irqsave(&device->asynclock, flags); } spin_unlock_irqrestore(&device->asynclock, flags); sdio_release_host(device->func); } complete_and_exit(&device->async_completion, 0); return 0; }