static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo) { s8 *fwname; const struct firmware *fw; int err; devinfo->image = g_image.data; devinfo->image_len = g_image.len; /* * if we have an image we can leave here. */ if (devinfo->image) return 0; fwname = BRCMF_USB_43236_FW_NAME; err = request_firmware(&fw, fwname, devinfo->dev); if (!fw) { brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname); return err; } if (check_file(fw->data) < 0) { brcmf_dbg(ERROR, "invalid firmware %s\n", fwname); return -EINVAL; } devinfo->image = vmalloc(fw->size); /* plus nvram */ if (!devinfo->image) return -ENOMEM; memcpy(devinfo->image, fw->data, fw->size); devinfo->image_len = fw->size; release_firmware(fw); return 0; }
static void brcmf_usb_probe_phase2(struct device *dev, const struct firmware *fw, void *nvram, u32 nvlen) { struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_usbdev_info *devinfo; int ret; brcmf_dbg(USB, "Start fw downloading\n"); devinfo = bus->bus_priv.usb->devinfo; ret = check_file(fw->data); if (ret < 0) { brcmf_err("invalid firmware\n"); release_firmware(fw); goto error; } devinfo->image = fw->data; devinfo->image_len = fw->size; ret = brcmf_usb_fw_download(devinfo); release_firmware(fw); if (ret) goto error; ret = brcmf_usb_bus_setup(devinfo); if (ret) goto error; mutex_unlock(&devinfo->dev_init_lock); return; error: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); mutex_unlock(&devinfo->dev_init_lock); device_release_driver(dev); }
static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) { struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); struct brcmf_usbreq *req; int ret; unsigned long flags; brcmf_dbg(USB, "Enter, skb=%p\n", skb); if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { ret = -EIO; goto fail; } req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq, &devinfo->tx_freecount); if (!req) { brcmf_err("no req to send\n"); ret = -ENOMEM; goto fail; } req->skb = skb; req->devinfo = devinfo; usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, skb->data, skb->len, brcmf_usb_tx_complete, req); req->urb->transfer_flags |= URB_ZERO_PACKET; brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL); ret = usb_submit_urb(req->urb, GFP_ATOMIC); if (ret) { brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n"); brcmf_usb_del_fromq(devinfo, req); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); goto fail; } spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); if (devinfo->tx_freecount < devinfo->tx_low_watermark && !devinfo->tx_flowblock) { brcmf_txflowblock(dev, true); devinfo->tx_flowblock = true; } spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); return 0; fail: return ret; }
void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable) { s32 err; u32 mode; if (enable) mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY; else mode = 0; /* Try to set and enable ARP offload feature, this may fail, then it */ /* is simply not supported and err 0 will be returned */ err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode); if (err) { brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", mode, err); } else { err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable); if (err) { brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n", enable, err); } else { brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n", enable, mode); } } err = brcmf_fil_iovar_int_set(ifp, "ndoe", enable); if (err) { brcmf_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n", enable, err); } else { brcmf_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n", enable, mode); } }
static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) { struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); struct brcmf_usbreq *req; int ret; if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { /* TODO: handle suspend/resume */ return -EIO; } req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq); if (!req) { brcmf_dbg(ERROR, "no req to send\n"); return -ENOMEM; } if (!req->urb) { brcmf_dbg(ERROR, "no urb for req %p\n", req); return -ENOBUFS; } req->skb = skb; req->devinfo = devinfo; usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, skb->data, skb->len, brcmf_usb_tx_complete, req); req->urb->transfer_flags |= URB_ZERO_PACKET; ret = usb_submit_urb(req->urb, GFP_ATOMIC); if (!ret) { brcmf_usb_enq(devinfo, &devinfo->tx_postq, req); } else { req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); } return ret; }
static void brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state) { struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus; brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n", devinfo->bus_pub.state, state); if (devinfo->bus_pub.state == state) return; devinfo->bus_pub.state = state; /* update state of upper layer */ if (state == BRCMFMAC_USB_STATE_DOWN) { brcmf_dbg(USB, "DBUS is down\n"); brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN); } else if (state == BRCMFMAC_USB_STATE_UP) { brcmf_dbg(USB, "DBUS is up\n"); brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_UP); } else { brcmf_dbg(USB, "DBUS current state=%d\n", state); } }
void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(TRACE, "\n"); /* Disable Function 2 */ sdio_claim_host(sdiodev->func[2]); sdio_disable_func(sdiodev->func[2]); sdio_release_host(sdiodev->func[2]); /* Disable Function 1 */ sdio_claim_host(sdiodev->func[1]); sdio_disable_func(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]); }
static void brcmf_usb_down(struct device *dev) { struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); brcmf_dbg(USB, "Enter\n"); if (devinfo == NULL) return; if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) return; brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN); brcmf_cancel_all_urbs(devinfo); }
static void brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) { u32 dma_sz; void *dma_buf; brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; dma_buf = msgbuf->flowrings[flowid]->buf_addr; dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, msgbuf->flowring_dma_handle[flowid]); brcmf_flowring_delete(msgbuf->flow, flowid); }
/* Convert user's input in hex pattern to byte-size mask */ static int brcmf_c_pattern_atoh(char *src, char *dst) { int i; if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) { brcmf_dbg(ERROR, "Mask invalid format. Needs to start with 0x\n"); return -EINVAL; } src = src + 2; /* Skip past 0x */ if (strlen(src) % 2 != 0) { brcmf_dbg(ERROR, "Mask invalid format. Length must be even.\n"); return -EINVAL; } for (i = 0; *src != '\0'; i++) { unsigned long res; char num[3]; strncpy(num, src, 2); num[2] = '\0'; if (kstrtoul(num, 16, &res)) return -EINVAL; dst[i] = (u8)res; src += 2; } return i; }
/* Read client card reg */ static int brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr, int regsize, u32 *data) { if ((func == 0) || (regsize == 1)) { u8 temp = 0; brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr, &temp); *data = temp; *data &= 0xff; brcmf_dbg(DATA, "byte read data=0x%02x\n", *data); } else { brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr, data, regsize); if (regsize == 2) *data &= 0xffff; brcmf_dbg(DATA, "word read data=0x%08x\n", *data); } return SUCCESS; }
static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) { u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, BRCMF_PCIE_CFGREG_PM_CSR, BRCMF_PCIE_CFGREG_MSI_CAP, BRCMF_PCIE_CFGREG_MSI_ADDR_L, BRCMF_PCIE_CFGREG_MSI_ADDR_H, BRCMF_PCIE_CFGREG_MSI_DATA, BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2, BRCMF_PCIE_CFGREG_RBAR_CTRL, BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1, BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG, BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG }; u32 i; u32 val; u32 lsc; if (!devinfo->ci) return; brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); WRITECC32(devinfo, watchdog, 4); msleep(100); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, cfg_offset[i]); val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", cfg_offset[i], val); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); } }
static int brcmf_pcie_ring_mb_update_wptr(void *ctx) { struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; struct brcmf_pciedev_info *devinfo = ring->devinfo; struct brcmf_commonring *commonring = &ring->commonring; if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, commonring->r_ptr, ring->id); return 0; }
/** * brcmf_btcoex_restore_part1() - restore first step parameters. */ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci) { struct brcmf_if *ifp; if (btci->saved_regs_part1) { btci->saved_regs_part1 = false; ifp = btci->vif->ifp; brcmf_btcoex_params_write(ifp, 66, btci->reg66); brcmf_btcoex_params_write(ifp, 41, btci->reg41); brcmf_btcoex_params_write(ifp, 68, btci->reg68); brcmf_dbg(INFO, "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n", btci->reg66, btci->reg41, btci->reg68); } }
/** * btcmf_btcoex_save_part1() - save first step parameters. */ static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci) { struct brcmf_if *ifp = btci->vif->ifp; if (!btci->saved_regs_part1) { /* Retrieve and save original reg value */ brcmf_btcoex_params_read(ifp, 66, &btci->reg66); brcmf_btcoex_params_read(ifp, 41, &btci->reg41); brcmf_btcoex_params_read(ifp, 68, &btci->reg68); btci->saved_regs_part1 = true; brcmf_dbg(INFO, "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n", btci->reg66, btci->reg41, btci->reg68); } }
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) { s32 err; mutex_lock(&ifp->drvr->proto_block); err = brcmf_fil_cmd_data(ifp, cmd, data, len, false); brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); mutex_unlock(&ifp->drvr->proto_block); return err; }
static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) { brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo); /* free the URBS */ brcmf_usb_free_q(&devinfo->rx_freeq); brcmf_usb_free_q(&devinfo->tx_freeq); usb_free_urb(devinfo->ctl_urb); usb_free_urb(devinfo->bulk_urb); kfree(devinfo->tx_reqs); kfree(devinfo->rx_reqs); if (devinfo->settings) brcmf_release_module_param(devinfo->settings); }
void brcmf_txflowblock(struct device *dev, bool state) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; int i; brcmf_dbg(TRACE, "Enter\n"); if (brcmf_fws_fc_active(drvr->fws)) { brcmf_fws_bus_blocked(drvr, state); } else { for (i = 0; i < BRCMF_MAX_IFS; i++) brcmf_txflowblock_if(drvr->iflist[i], BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state); } }
static int brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time) { int ret; int err = 0; int ms = time; ret = wait_event_interruptible_timeout(devinfo->wait, devinfo->waitdone == true, (ms * HZ / 1000)); if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) { brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n", ret, devinfo->sync_urb_status); err = -EINVAL; } devinfo->waitdone = false; return err; }
static void brcmf_usb_free_q(struct list_head *q, bool pending) { struct brcmf_usbreq *req, *next; int i = 0; list_for_each_entry_safe(req, next, q, list) { if (!req->urb) { brcmf_dbg(ERROR, "bad req\n"); break; } i++; if (pending) { usb_kill_urb(req->urb); } else { usb_free_urb(req->urb); list_del_init(&req->list); } } }
void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state) { if (!ifp) return; brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", ifp->bssidx, ifp->netif_stop, reason, state); if (state) { if (!ifp->netif_stop) netif_stop_queue(ifp->ndev); ifp->netif_stop |= reason; } else { ifp->netif_stop &= ~reason; if (!ifp->netif_stop) netif_wake_queue(ifp->ndev); } }
void brcmf_txflowblock(struct device *dev, bool state) { struct net_device *ndev; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; int i; brcmf_dbg(TRACE, "Enter\n"); for (i = 0; i < BRCMF_MAX_IFS; i++) if (drvr->iflist[i]) { ndev = drvr->iflist[i]->ndev; if (state) netif_stop_queue(ndev); else netif_wake_queue(ndev); } }
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on) { struct net_device *ndev; brcmf_dbg(TRACE, "Enter, bsscfgidx=%d carrier=%d\n", ifp->bsscfgidx, on); ndev = ifp->ndev; brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on); if (on) { if (!netif_carrier_ok(ndev)) netif_carrier_on(ndev); } else { if (netif_carrier_ok(ndev)) netif_carrier_off(ndev); } }
int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) { struct brcmf_pub *drvr = NULL; int ret = 0; int i; brcmf_dbg(TRACE, "Enter\n"); /* Allocate primary brcmf_info */ drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC); if (!drvr) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++) drvr->if2bss[i] = BRCMF_BSSIDX_INVALID; mutex_init(&drvr->proto_block); /* Link to bus module */ drvr->hdrlen = 0; drvr->bus_if = dev_get_drvdata(dev); drvr->bus_if->drvr = drvr; drvr->settings = settings; /* attach debug facilities */ brcmf_debug_attach(drvr); /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); if (ret != 0) { brcmf_err("brcmf_prot_attach failed\n"); goto fail; } /* attach firmware event handler */ brcmf_fweh_attach(drvr); return ret; fail: brcmf_detach(dev); return ret; }
static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, bool rtnl_locked) { struct brcmf_if *ifp; ifp = drvr->iflist[bsscfgidx]; drvr->iflist[bsscfgidx] = NULL; if (!ifp) { brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx); return; } brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifp->ifidx); if (drvr->if2bss[ifp->ifidx] == bsscfgidx) drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID; if (ifp->ndev) { if (bsscfgidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { rtnl_lock(); brcmf_netdev_stop(ifp->ndev); rtnl_unlock(); } } else { netif_stop_queue(ifp->ndev); } if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { cancel_work_sync(&ifp->setmacaddr_work); cancel_work_sync(&ifp->multicast_work); cancel_work_sync(&ifp->ndoffload_work); } brcmf_net_detach(ifp->ndev, rtnl_locked); } else { /* Only p2p device interfaces which get dynamically created * end up here. In this case the p2p module should be informed * about the removal of the interface within the firmware. If * not then p2p commands towards the firmware will cause some * serious troublesome side effects. The p2p module will clean * up the ifp if needed. */ brcmf_p2p_ifp_removed(ifp, rtnl_locked); kfree(ifp); } }
static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) { struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; brcmf_dbg(INTR, "OOB intr triggered\n"); /* out-of-band interrupt is level-triggered which won't * be cleared until dpc */ if (sdiodev->irq_en) { disable_irq_nosync(irq); sdiodev->irq_en = false; } brcmf_sdio_isr(sdiodev->bus); return IRQ_HANDLED; }
struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid) { struct brcmf_flowring_ring *ring; struct sk_buff *skb; ring = flow->rings[flowid]; if (ring->status != RING_OPEN) return NULL; skb = skb_dequeue(&ring->skblist); if (ring->blocked && (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) { brcmf_flowring_block(flow, flowid, false); brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid); } return skb; }
static int brcmf_usb_reset_resume(struct usb_interface *intf) { struct usb_device *usb = interface_to_usbdev(intf); struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); struct brcmf_fw_request *fwreq; int ret; brcmf_dbg(USB, "Enter\n"); fwreq = brcmf_usb_prepare_fw_request(devinfo); if (!fwreq) return -ENOMEM; ret = brcmf_fw_get_firmwares(&usb->dev, fwreq, brcmf_usb_probe_phase2); if (ret < 0) kfree(fwreq); return ret; }
static int check_file(const u8 *headers) { struct trx_header_le *trx; int actual_len = -1; brcmf_dbg(USB, "Enter\n"); /* Extract trx header */ trx = (struct trx_header_le *) headers; if (trx->magic != cpu_to_le32(TRX_MAGIC)) return -1; headers += sizeof(struct trx_header_le); if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) { actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]); return actual_len + sizeof(struct trx_header_le); } return -1; }
int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint func, uint addr, u32 *word, uint nbytes) { int err_ret = -EIO; if (func == 0) { brcmf_err("Only CMD52 allowed to F0\n"); return -EINVAL; } brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", rw, func, addr, nbytes); brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; if (rw) { /* CMD52 Write */ if (nbytes == 4) sdio_writel(sdiodev->func[func], *word, addr, &err_ret); else if (nbytes == 2) sdio_writew(sdiodev->func[func], (*word & 0xFFFF), addr, &err_ret); else brcmf_err("Invalid nbytes: %d\n", nbytes); } else { /* CMD52 Read */ if (nbytes == 4) *word = sdio_readl(sdiodev->func[func], addr, &err_ret); else if (nbytes == 2) *word = sdio_readw(sdiodev->func[func], addr, &err_ret) & 0xFFFF; else brcmf_err("Invalid nbytes: %d\n", nbytes); } if (err_ret) brcmf_err("Failed to %s word, Err: 0x%08x\n", rw ? "write" : "read", err_ret); return err_ret; }