static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { int ret = 0; struct sdio_func *func = wl_to_func(wl); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", addr, ((u8 *)buf)[0]); } else { if (fixed) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", addr, len); wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); } if (ret) wl1271_error("sdio read failed (%d)", ret); }
static A_STATUS __HIFReadWrite(HIF_DEVICE *device, A_UINT32 address, A_UCHAR *buffer, A_UINT32 length, A_UINT32 request, void *context) { A_UINT8 opcode; A_STATUS status = A_OK; int ret; A_UINT8 *tbuffer; A_BOOL bounced = FALSE; AR_DEBUG_ASSERT(device != NULL); AR_DEBUG_ASSERT(device->func != NULL); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: 0x%p, buffer:0x%p (addr:0x%X)\n", device, buffer, address)); do { if (request & HIF_EXTENDED_IO) { //AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Command type: CMD53\n")); } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: Invalid command type: 0x%08x\n", request)); status = A_EINVAL; break; } if (request & HIF_BLOCK_BASIS) { /* round to whole block length size */ length = (length / HIF_MBOX_BLOCK_SIZE) * HIF_MBOX_BLOCK_SIZE; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Block mode (BlockLen: %d)\n", length)); } else if (request & HIF_BYTE_BASIS) { AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Byte mode (BlockLen: %d)\n", length)); } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: Invalid data mode: 0x%08x\n", request)); status = A_EINVAL; break; } #if 0 /* useful for checking register accesses */ if (length & 0x3) { A_PRINTF(KERN_ALERT"AR6000: HIF (%s) is not a multiple of 4 bytes, addr:0x%X, len:%d\n", request & HIF_WRITE ? "write":"read", address, length); } #endif if (request & HIF_WRITE) { if ((address >= HIF_MBOX_START_ADDR(0)) && (address <= HIF_MBOX_END_ADDR(3))) { AR_DEBUG_ASSERT(length <= HIF_MBOX_WIDTH); /* * Mailbox write. Adjust the address so that the last byte * falls on the EOM address. */ address += (HIF_MBOX_WIDTH - length); } } if (request & HIF_FIXED_ADDRESS) { opcode = CMD53_FIXED_ADDRESS; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Fixed 0x%X\n", address)); } else if (request & HIF_INCREMENTAL_ADDRESS) { opcode = CMD53_INCR_ADDRESS; AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Incremental 0x%X\n", address)); } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: Invalid address mode: 0x%08x\n", request)); status = A_EINVAL; break; } if (request & HIF_WRITE) { #if HIF_USE_DMA_BOUNCE_BUFFER if (BUFFER_NEEDS_BOUNCE(buffer)) { AR_DEBUG_ASSERT(device->dma_buffer != NULL); tbuffer = device->dma_buffer; /* copy the write data to the dma buffer */ AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); memcpy(tbuffer, buffer, length); bounced = TRUE; } else { tbuffer = buffer; } #else tbuffer = buffer; #endif if (opcode == CMD53_FIXED_ADDRESS) { ret = sdio_writesb(device->func, address, tbuffer, length); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writesb ret=%d address: 0x%X, len: %d, 0x%X\n", ret, address, length, *(int *)tbuffer)); } else { ret = sdio_memcpy_toio(device->func, address, tbuffer, length); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writeio ret=%d address: 0x%X, len: %d, 0x%X\n", ret, address, length, *(int *)tbuffer)); } } else if (request & HIF_READ) { #if HIF_USE_DMA_BOUNCE_BUFFER if (BUFFER_NEEDS_BOUNCE(buffer)) { AR_DEBUG_ASSERT(device->dma_buffer != NULL); AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); tbuffer = device->dma_buffer; bounced = TRUE; } else { tbuffer = buffer; } #else tbuffer = buffer; #endif if (opcode == CMD53_FIXED_ADDRESS) { ret = sdio_readsb(device->func, tbuffer, address, length); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readsb ret=%d address: 0x%X, len: %d, 0x%X\n", ret, address, length, *(int *)tbuffer)); } else { ret = sdio_memcpy_fromio(device->func, tbuffer, address, length); AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readio ret=%d address: 0x%X, len: %d, 0x%X\n", ret, address, length, *(int *)tbuffer)); } #if HIF_USE_DMA_BOUNCE_BUFFER if (bounced) { /* copy the read data from the dma buffer */ memcpy(buffer, tbuffer, length); } #endif } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: Invalid direction: 0x%08x\n", request)); status = A_EINVAL; break; } if (ret) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: SDIO bus operation failed! MMC stack returned : %d \n", ret)); status = A_ERROR; } } while (FALSE); return status; }
static void smssdio_work_thread(struct work_struct *arg) { int ret, isr; struct smscore_buffer_t *cb; struct SmsMsgHdr_S *hdr; size_t size; struct smssdio_device *smsdev = container_of(arg, struct smssdio_device, work_thread); struct sdio_func *sdfunc = smsdev->func; /* * The interrupt register has no defined meaning. It is just * a way of turning of the level triggered interrupt. */ sdio_claim_host(smsdev->func); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); if (ret) { sms_err("Got error reading interrupt status=%d, isr=%d\n", ret, isr); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); if (ret) { sms_err("Second read also failed, try to recover\n"); sdio_release_host(smsdev->func); sdfunc = kmemdup(smsdev->func, sizeof(struct sdio_func), GFP_KERNEL); if (!sdfunc) { sms_err("Out of memory!!!"); return; } sdfunc->num = 0; sdio_claim_host(sdfunc); sdio_writeb(sdfunc, 2, SMSSDIO_CCCR, &ret); sms_err("Read ISR status (write returned) %d\n", ret); isr = sdio_readb(smsdev->func, SMSSDIO_INT, &ret); sms_err("Read returned ret=%d, isr=%d\n", ret, isr); sdio_writeb(sdfunc, 0, SMSSDIO_CCCR, &ret); sdio_release_host(sdfunc); kfree(sdfunc); sms_err("Recovered, but this transaction is lost."); return; } sms_err("Second read succeed status=%d, isr=%d (continue)\n", ret, isr); } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); sdio_release_host(smsdev->func); return; } ret = sdio_memcpy_fromio(smsdev->func, cb->p, SMSSDIO_DATA, SMSSDIO_BLOCK_SIZE); if (ret) { sms_warn("Error %d reading initial block, " "continue with sequence.\n", ret); } hdr = cb->p; if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) { smsdev->split_cb = cb; sdio_release_host(smsdev->func); return; } if (hdr->msgLength > smsdev->func->cur_blksize) size = hdr->msgLength - smsdev->func->cur_blksize; else size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; size = hdr->msgLength - sizeof(struct SmsMsgHdr_S); smsdev->split_cb = NULL; } if (size) { void *buffer; buffer = cb->p + (hdr->msgLength - size); size = ALIGN(size, SMSSDIO_BLOCK_SIZE); BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading data from card!\n", ret); sdio_release_host(smsdev->func); return; } /* * ..then fall back to one block at a time if that is * not possible... * * (we have to do this manually because of the * problem with the "increase address" bit) */ if (ret == -EINVAL) { while (size) { ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading " "data from card!\n", ret); sdio_release_host(smsdev->func); return; } buffer += smsdev->func->cur_blksize; if (size > smsdev->func->cur_blksize) size -= smsdev->func->cur_blksize; else size = 0; } } } sdio_release_host(smsdev->func); cb->size = hdr->msgLength; cb->offset = 0; smscore_onresponse(smsdev->coredev, cb); }
static void iwmct_irq_read_worker(struct work_struct *ws) { struct iwmct_priv *priv; struct iwmct_work_struct *read_req; __le32 *buf = NULL; int ret; int iosize; u32 barker; bool is_barker; priv = container_of(ws, struct iwmct_priv, isr_worker); LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); /* --------------------- Handshake with device -------------------- */ sdio_claim_host(priv->func); /* all list manipulations have to be protected by * sdio_claim_host/sdio_release_host */ if (list_empty(&priv->read_req_list)) { LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n"); goto exit_release; } read_req = list_entry(priv->read_req_list.next, struct iwmct_work_struct, list); list_del(&read_req->list); iosize = read_req->iosize; kfree(read_req); buf = kzalloc(iosize, GFP_KERNEL); if (!buf) { LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize); goto exit_release; } LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n", iosize, buf, priv->func->num); /* read from device */ ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize); if (ret) { LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret); goto exit_release; } LOG_HEXDUMP(IRQ, (u8 *)buf, iosize); barker = le32_to_cpu(buf[0]); /* Verify whether it's a barker and if not - treat as regular Rx */ if (barker == IWMC_BARKER_ACK || (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) { /* Valid Barker is equal on first 4 dwords */ is_barker = (buf[1] == buf[0]) && (buf[2] == buf[0]) && (buf[3] == buf[0]); if (!is_barker) { LOG_WARNING(priv, IRQ, "Potentially inconsistent barker " "%08X_%08X_%08X_%08X\n", le32_to_cpu(buf[0]), le32_to_cpu(buf[1]), le32_to_cpu(buf[2]), le32_to_cpu(buf[3])); } } else { is_barker = false; } /* Handle Top CommHub message */ if (!is_barker) { sdio_release_host(priv->func); handle_top_message(priv, (u8 *)buf, iosize); goto exit; } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */ if (atomic_read(&priv->dev_sync) == 0) { LOG_ERROR(priv, IRQ, "ACK barker arrived out-of-sync\n"); goto exit_release; } /* Continuing to FW download (after Sync is completed)*/ atomic_set(&priv->dev_sync, 0); LOG_INFO(priv, IRQ, "ACK barker arrived " "- starting FW download\n"); } else { /* REBOOT barker */ LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker); priv->barker = barker; if (barker & BARKER_DNLOAD_SYNC_MSK) { /* Send the same barker back */ ret = __iwmct_tx(priv, buf, iosize); if (ret) { LOG_ERROR(priv, IRQ, "error %d echoing barker\n", ret); goto exit_release; } LOG_INFO(priv, IRQ, "Echoing barker to device\n"); atomic_set(&priv->dev_sync, 1); goto exit_release; } /* Continuing to FW download (without Sync) */ LOG_INFO(priv, IRQ, "No sync requested " "- starting FW download\n"); } sdio_release_host(priv->func); if (priv->dbg.fw_download) iwmct_fw_load(priv); else LOG_ERROR(priv, IRQ, "FW download not allowed\n"); goto exit; exit_release: sdio_release_host(priv->func); exit: kfree(buf); LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n"); }
int bcmsdio_cmd53(unsigned int offset, int rw, int func, int blk_mode, int opcode, int buflen, char *buff) { struct sdio_func *function = func_data[BCM_SDIO_FN1]->func; int ret = -1; int count = 0; if (function == NULL) { BCM_DEBUG_PRINT(ERROR_LEVEL, KERN_ALERT " ***Error: %s %d ***\n", __func__, __LINE__); return -ENON_INTF_ERR; } sdio_claim_host(function); if(func_data[BCM_SDIO_FN1]->bremoved) { ret = -ENON_INTF_ERR; BCM_DEBUG_PRINT(ERROR_LEVEL, KERN_ALERT "Error :%s,%d removed flag var is non-zero :%d \n", __func__, __LINE__,func_data[BCM_SDIO_FN1]->bremoved); goto rel_host; } /* NOTE: blk_mode is not used here. If buflen exceeds corresponding * block size then SDIO stack will internally convert the request to * block req * */ if(buflen%4) { int i; /* Some SDIO controllers don't like CMD53 for * request len not-multiple of 4. */ ret = 0; for(i=0; i<buflen; i++) { buff[i] = bcmsdio_cmd52_nolock(buff[i], offset, rw, func,&ret); if(ret) { BCM_DEBUG_PRINT(debuglevel, "FAILED IN INDEX: %d for CMD52 %d rw: %x addr: %x\n", i, ret, rw, offset); goto rel_host; } else { if(opcode) offset++; } } } else { while( count < SDIO_CMD_RETRIES ) { if(func != BCM_SDIO_FN0) { if (rw) { if (opcode) ret = sdio_memcpy_toio(function, offset, buff, buflen); else ret = sdio_writesb(function, offset, buff, buflen); } else { if (opcode) ret = sdio_memcpy_fromio(function, buff, offset, buflen); else ret = sdio_readsb(function, buff, offset, buflen); } } else { ret = bcm_sdio_cmd53(function,rw,offset,opcode,buff,buflen,BCM_SDIO_FN0_BLK_SIZE); } if(!ret) break; count++; } if(count) BCM_DEBUG_PRINT(debuglevel, "Count is higher than 0 for cmd53: %x\n", count); if(ret) BCM_DEBUG_PRINT(debuglevel, "FAILED IN CMD53 %d rw: %x addr: %x count: %x\n", ret, rw, offset, count); } //BCM_DEBUG_PRINT(debuglevel, KERN_ALERT "cmd53-fn-%d-%d: addr:x%x w:x%x sz:x%x ret:x%x\n", func, function->num, offset, rw, buflen, ret); rel_host: sdio_release_host(function); return ret; }
static void iwm_sdio_isr(struct sdio_func *func) { struct iwm_priv *iwm; struct iwm_sdio_priv *hw; struct iwm_rx_info *rx_info; struct sk_buff *skb; unsigned long buf_size, read_size; int ret; u8 val; hw = sdio_get_drvdata(func); iwm = hw_to_iwm(hw); buf_size = hw->blk_size; /* We're checking the status */ val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret); if (val == 0 || ret < 0) { IWM_ERR(iwm, "Wrong INTR_STATUS\n"); return; } /* See if we have free buffers */ if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) { IWM_ERR(iwm, "No buffer for more Rx frames\n"); return; } /* We first read the transaction size */ read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret); read_size = read_size << 8; if (ret < 0) { IWM_ERR(iwm, "Couldn't read the xfer size\n"); return; } /* We need to clear the INT register */ sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret); if (ret < 0) { IWM_ERR(iwm, "Couldn't clear the INT register\n"); return; } while (buf_size < read_size) buf_size <<= 1; skb = dev_alloc_skb(buf_size); if (!skb) { IWM_ERR(iwm, "Couldn't alloc RX skb\n"); return; } rx_info = skb_to_rx_info(skb); rx_info->rx_size = read_size; rx_info->rx_buf_size = buf_size; /* Now we can read the actual buffer */ ret = sdio_memcpy_fromio(func, skb_put(skb, read_size), IWM_SDIO_DATA_ADDR, read_size); /* The skb is put on a driver's specific Rx SKB list */ skb_queue_tail(&iwm->rx_list, skb); /* We can now schedule the actual worker */ queue_work(hw->isr_wq, &hw->isr_worker); }
uint sdbus_write_reg_int(struct intf_priv *pintfpriv, u32 addr, u32 cnt, void *pdata) { struct dvobj_priv *pdvobjpriv = (struct dvobj_priv*)pintfpriv->intf_dev; struct sdio_func *func = pdvobjpriv->func; int status; #ifdef CONFIG_IO_4B u32 addr_org = addr, addr_offset = 0; u32 cnt_org = cnt; void *pdata_org = pdata; #endif _func_enter_; #ifdef CONFIG_IO_4B addr_offset = addr % 4; if (addr_offset) { addr = addr - addr_offset; cnt = cnt + addr_offset; } if (cnt % 4) cnt = ((cnt + 4) >> 2) << 2; if (cnt != cnt_org) { pdata = rtw_malloc(cnt); if (pdata == NULL) { RT_TRACE(_module_hci_ops_os_c_, _drv_emerg_, ("SDIO_STATUS_NO_RESOURCES - rtw_malloc fail\n")); return _FAIL; } status = sdio_memcpy_fromio(func, pdata, addr&0x1FFFF, cnt); if (status) { RT_TRACE(_module_hci_ops_os_c_,_drv_emerg_, ("sdbus_write_reg_int read failed 0x%x\n " "***** Addr = %x *****\n" "***** Length = %d *****\n", status, addr, cnt)); rtw_mfree(pdata, cnt); return _FAIL; } _rtw_memcpy(pdata + addr_offset, pdata_org, cnt_org); /* if data been modify between this read and write, may cause a problem */ } #endif status = sdio_memcpy_toio(func, addr&0x1FFFF, pdata, cnt); if (status) { //error RT_TRACE(_module_hci_ops_os_c_, _drv_emerg_, ("sdbus_write_reg_int failed 0x%x\n" "***** Addr = %x *****\n" "***** Length = %d *****\n", status, addr, cnt)); status = _FAIL; } else status = _SUCCESS; #ifdef CONFIG_IO_4B if (cnt != cnt_org) rtw_mfree(pdata, cnt); #endif _func_exit_; return status; }
static void iwm_sdio_isr(struct sdio_func *func) { struct iwm_priv *iwm; struct iwm_sdio_priv *hw; struct iwm_rx_info *rx_info; struct sk_buff *skb; unsigned long buf_size, read_size; int ret; u8 val; hw = sdio_get_drvdata(func); iwm = hw_to_iwm(hw); buf_size = hw->blk_size; val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret); if (val == 0 || ret < 0) { IWM_ERR(iwm, "Wrong INTR_STATUS\n"); return; } if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) { IWM_ERR(iwm, "No buffer for more Rx frames\n"); return; } read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret); read_size = read_size << 8; if (ret < 0) { IWM_ERR(iwm, "Couldn't read the xfer size\n"); return; } sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret); if (ret < 0) { IWM_ERR(iwm, "Couldn't clear the INT register\n"); return; } while (buf_size < read_size) buf_size <<= 1; skb = dev_alloc_skb(buf_size); if (!skb) { IWM_ERR(iwm, "Couldn't alloc RX skb\n"); return; } rx_info = skb_to_rx_info(skb); rx_info->rx_size = read_size; rx_info->rx_buf_size = buf_size; ret = sdio_memcpy_fromio(func, skb_put(skb, read_size), IWM_SDIO_DATA_ADDR, read_size); skb_queue_tail(&iwm->rx_list, skb); queue_work(hw->isr_wq, &hw->isr_worker); }
static int brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc, uint write, uint func, uint addr, struct sk_buff *pkt) { bool fifo = (fix_inc == SDIOH_DATA_FIX); u32 SGCount = 0; int err_ret = 0; struct sk_buff *pnext; brcmf_dbg(TRACE, "Enter\n"); brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; /* Claim host controller */ sdio_claim_host(sdiodev->func[func]); for (pnext = pkt; pnext; pnext = pnext->next) { uint pkt_len = pnext->len; pkt_len += 3; pkt_len &= 0xFFFFFFFC; if ((write) && (!fifo)) { err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, ((u8 *) (pnext->data)), pkt_len); } else if (write) { err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, ((u8 *) (pnext->data)), pkt_len); } else if (fifo) { err_ret = sdio_readsb(sdiodev->func[func], ((u8 *) (pnext->data)), addr, pkt_len); } else { err_ret = sdio_memcpy_fromio(sdiodev->func[func], ((u8 *) (pnext->data)), addr, pkt_len); } if (err_ret) { brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", write ? "TX" : "RX", pnext, SGCount, addr, pkt_len, err_ret); } else { brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", write ? "TX" : "RX", pnext, SGCount, addr, pkt_len); } if (!fifo) addr += pkt_len; SGCount++; } /* Release host controller */ sdio_release_host(sdiodev->func[func]); brcmf_dbg(TRACE, "Exit\n"); return err_ret; }
static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self, unsigned int addr, void *dst, int count) { return sdio_memcpy_fromio(self->func, dst, addr, count); }
static void smssdio_interrupt(struct sdio_func *func) { int ret, isr; struct smssdio_device *smsdev; struct smscore_buffer_t *cb; struct SmsMsgHdr_ST *hdr; size_t size; smsdev = sdio_get_drvdata(func); /* * The interrupt register has no defined meaning. It is just * a way of turning of the level triggered interrupt. */ isr = sdio_readb(func, SMSSDIO_INT, &ret); if (ret) { sms_err("Unable to read interrupt register!\n"); return; } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); return; } ret = sdio_memcpy_fromio(smsdev->func, cb->p, SMSSDIO_DATA, SMSSDIO_BLOCK_SIZE); if (ret) { sms_err("Error %d reading initial block!\n", ret); return; } hdr = cb->p; if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) { smsdev->split_cb = cb; return; } if (hdr->msgLength > smsdev->func->cur_blksize) size = hdr->msgLength - smsdev->func->cur_blksize; else size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST); smsdev->split_cb = NULL; } if (size) { void *buffer; buffer = cb->p + (hdr->msgLength - size); size = ALIGN(size, SMSSDIO_BLOCK_SIZE); BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading data from card!\n", ret); return; } /* * ..then fall back to one block at a time if that is * not possible... * * (we have to do this manually because of the * problem with the "increase address" bit) */ if (ret == -EINVAL) { while (size) { ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading " "data from card!\n", ret); return; } buffer += smsdev->func->cur_blksize; if (size > smsdev->func->cur_blksize) size -= smsdev->func->cur_blksize; else size = 0; } } } cb->size = hdr->msgLength; cb->offset = 0; smscore_onresponse(smsdev->coredev, cb); }
static void i2400ms_rx(struct i2400ms *i2400ms) { int ret; struct sdio_func *func = i2400ms->func; struct device *dev = &func->dev; struct i2400m *i2400m = &i2400ms->i2400m; struct sk_buff *skb; ssize_t rx_size; d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms); rx_size = __i2400ms_rx_get_size(i2400ms); if (rx_size < 0) { ret = rx_size; goto error_get_size; } sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret); ret = -ENOMEM; skb = alloc_skb(rx_size, GFP_ATOMIC); if (NULL == skb) { dev_err(dev, "RX: unable to alloc skb\n"); goto error_alloc_skb; } ret = sdio_memcpy_fromio(func, skb->data, I2400MS_DATA_ADDR, rx_size); if (ret < 0) { dev_err(dev, "RX: SDIO data read failed: %d\n", ret); goto error_memcpy_fromio; } rmb(); if (unlikely(i2400m->boot_mode == 1)) { spin_lock(&i2400m->rx_lock); i2400ms->bm_ack_size = rx_size; spin_unlock(&i2400m->rx_lock); memcpy(i2400m->bm_ack_buf, skb->data, rx_size); wake_up(&i2400ms->bm_wfa_wq); d_printf(5, dev, "RX: SDIO boot mode message\n"); kfree_skb(skb); goto out; } ret = -EIO; if (unlikely(rx_size < sizeof(__le32))) { dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size); goto error_bad_size; } if (likely(i2400m_is_d2h_barker(skb->data))) { skb_put(skb, rx_size); i2400m_rx(i2400m, skb); } else if (unlikely(i2400m_is_boot_barker(i2400m, skb->data, rx_size))) { ret = i2400m_dev_reset_handle(i2400m, "device rebooted"); dev_err(dev, "RX: SDIO reboot barker\n"); kfree_skb(skb); } else { i2400m_unknown_barker(i2400m, skb->data, rx_size); kfree_skb(skb); } out: d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms); return; error_memcpy_fromio: kfree_skb(skb); error_alloc_skb: error_get_size: error_bad_size: d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret); }
int sif_io_sync(struct esp_pub *epub, u32 addr, u8 *buf, u32 len, u32 flag) { int err = 0; u8 * ibuf = NULL; bool need_ibuf = false; struct esp_sdio_ctrl *sctrl = NULL; struct sdio_func *func = NULL; if (epub == NULL || buf == NULL) { ESSERT(0); err = -EINVAL; goto _exit; } sctrl = (struct esp_sdio_ctrl *)epub->sif; func = sctrl->func; if (func == NULL) { ESSERT(0); err = -EINVAL; goto _exit; } if (bad_buf(buf)) { esp_dbg(ESP_DBG_TRACE, "%s dst 0x%08x, len %d badbuf\n", __func__, addr, len); need_ibuf = true; ibuf = sctrl->dma_buffer; } else { ibuf = buf; } if (flag & SIF_BLOCK_BASIS) { /* round up for block data transcation */ } if (flag & SIF_TO_DEVICE) { esp_dbg(ESP_DBG_TRACE, "%s to addr 0x%08x, len %d \n", __func__, addr, len); if (need_ibuf) memcpy(ibuf, buf, len); sdio_claim_host(func); if (flag & SIF_FIXED_ADDR) err = sdio_writesb(func, addr, ibuf, len); else if (flag & SIF_INC_ADDR) { err = sdio_memcpy_toio(func, addr, ibuf, len); } sif_platform_check_r1_ready(epub); sdio_release_host(func); } else if (flag & SIF_FROM_DEVICE) { esp_dbg(ESP_DBG_TRACE, "%s from addr 0x%08x, len %d \n", __func__, addr, len); sdio_claim_host(func); if (flag & SIF_FIXED_ADDR) err = sdio_readsb(func, ibuf, addr, len); else if (flag & SIF_INC_ADDR) { err = sdio_memcpy_fromio(func, ibuf, addr, len); } sdio_release_host(func); if (!err && need_ibuf) memcpy(buf, ibuf, len); } _exit: return err; }
void adapter_sdio_rx_worker(struct work_struct *work) { struct net_adapter *adapter; struct net_device *net; int err = 1; u_int len = 0; u_int remained_len = 0; int nReadIdx; u32 t_len; u32 t_index; u32 t_size; u8 *t_buff; adapter = container_of(work, struct net_adapter, receive_work); if (unlikely(!adapter)) { dump_debug("adapter is point to NULL !!!!"); return; } net = adapter->net; if (unlikely(!netif_device_present(net))) { dump_debug("!device_present!"); return; } sdio_claim_host(adapter->func); hwSdioReadBankIndex(adapter, &nReadIdx, &err); if (err || (nReadIdx < 0)) { dump_debug("%s :Invalid Read Index !!!", __func__); sdio_release_host(adapter->func); return ; } hwSdioReadCounter(adapter, &len, &nReadIdx, &err); /* read received byte */ if (unlikely(err || (!len))) { dump_debug("!hwSdioReadCounter in adapter_sdio_rx_worker!"); adapter->halted = TRUE; sdio_release_host(adapter->func); return; } if (unlikely(len > SDIO_BUFFER_SIZE)) { dump_debug("ERROR RECV length (%d) > SDIO_BUFFER_SIZE", len); len = SDIO_BUFFER_SIZE; } sdio_writeb(adapter->func, (nReadIdx + 1) % 16, SDIO_C2H_RP_REG, NULL); /* leave some space to copy the ethernet header */ t_len = len; t_index = (SDIO_RX_BANK_ADDR + (SDIO_BANK_SIZE * nReadIdx) + 4); t_buff = (u8 *)adapter->hw.receive_buffer + HEADER_MANIPULATION_OFFSET ; while (t_len) { t_size = (t_len > 512) ? (512) : t_len; err = sdio_memcpy_fromio(adapter->func, (void *)t_buff, t_index, t_size); t_len -= t_size; t_buff += t_size; t_index += t_size; } if (unlikely(err || (!len))) { dump_debug("adapter_sdio_rx_worker : \ error in receiving packet!!drop the \ packet errt = %d, len = %d", err, len); adapter->netstats.rx_errors++; }
static struct buffer_descriptor *rx_packet(struct net_adapter *adapter) { int ret = 0; int read_idx; struct buffer_descriptor *bufdsc; s32 t_len; s32 t_index; s32 t_size; u8 *t_buff; read_idx = sdio_readb(adapter->func, SDIO_C2H_RP_REG, &ret); bufdsc = kmalloc(sizeof(*bufdsc), GFP_KERNEL); if (unlikely(!bufdsc)) { pr_err("%s bufdsc alloc fail", __func__); return NULL; } if (unlikely(ret)) { pr_err("%s sdio_readb error", __func__); schedule_work(&adapter->wimax_reset); goto err; } #if 0 /*check modem buffer overflow*/ if (read_idx == sdio_readb(adapter->func, SDIO_C2H_WP_REG, &ret)) { read_idx = -1; goto err; } #endif #ifdef CMC7xx_MULTIPACKET_SUPPORT if (adapter->download_complete) t_len = sdio_readl(adapter->func, (SDIO_RX_BANK_ADDR + (read_idx * SDIO_RXBANK_SIZE)), &ret); else #endif t_len = sdio_readl(adapter->func, (SDIO_RX_BANK_ADDR + (read_idx * SDIO_BANK_SIZE)), &ret); if (unlikely(ret)) { pr_err("%s sdio_readl error", __func__); schedule_work(&adapter->wimax_reset); goto err; } #ifdef CMC7xx_MULTIPACKET_SUPPORT if (adapter->download_complete) { if (unlikely(t_len > (SDIO_RXBANK_SIZE - CMC732_PACKET_LENGTH_SIZE))) { pr_err("%s length out of bound", __func__); t_len = SDIO_RXBANK_SIZE - CMC732_PACKET_LENGTH_SIZE; } sdio_writeb(adapter->func, (read_idx + 1) % SDIO_RXBANK_COUNT, SDIO_C2H_RP_REG, NULL); } else #endif { if (unlikely(t_len > (SDIO_BANK_SIZE - CMC732_PACKET_LENGTH_SIZE))) { pr_err("%s length out of bound", __func__); t_len = SDIO_BANK_SIZE - CMC732_PACKET_LENGTH_SIZE; } sdio_writeb(adapter->func, (read_idx + 1) % 16, SDIO_C2H_RP_REG, NULL); } bufdsc->buffer = kmalloc(t_len, GFP_KERNEL); if (unlikely(!bufdsc->buffer)) { pr_err("%s bufdsc->buffer alloc fail", __func__); goto err; } bufdsc->length = (s32)t_len; t_buff = (u8 *)bufdsc->buffer; #ifdef RX_SINGLE_BLOCK_MODE #ifdef CMC7xx_MULTIPACKET_SUPPORT if (adapter->download_complete) t_index = (SDIO_RX_BANK_ADDR + (SDIO_RXBANK_SIZE * read_idx) + 4); else #endif t_index = (SDIO_RX_BANK_ADDR + (SDIO_BANK_SIZE * read_idx) + 4); while (likely(t_len)) { t_size = (t_len > CMC_BLOCK_SIZE) ? (CMC_BLOCK_SIZE) : t_len; ret = sdio_memcpy_fromio(adapter->func, (void *)t_buff, t_index, t_size); if (unlikely(ret)) { pr_err("%s sdio_memcpy_fromio fail\n", __func__); schedule_work(&adapter->wimax_reset); goto err_2; } t_len -= t_size; t_buff += t_size; t_index += t_size; } #else ret = sdio_memcpy_fromio(adapter->func, (void *)t_buff, t_index, t_len); if (unlikely(ret)) { pr_err("%s sdio_memcpy_fromio fail", __func__); schedule_work(&adapter->wimax_reset); goto err_2; } #endif return bufdsc; err_2: kfree(bufdsc->buffer); err: kfree(bufdsc); adapter->netstats.rx_dropped++; return NULL; }