/** * @brief This function initialize the rx URBs and submit them * * @param handle Pointer to moal_handle structure * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ mlan_status woal_usb_rx_init(moal_handle * handle) { struct usb_card_rec *cardp = (struct usb_card_rec *) handle->card; int i; mlan_status ret = MLAN_STATUS_SUCCESS; ENTER(); cardp->rx_cmd.handle = handle; cardp->rx_cmd.ep = cardp->rx_cmd_ep; /* Allocate URB for command/event */ if (!(cardp->rx_cmd.urb = usb_alloc_urb(0, GFP_ATOMIC))) { PRINTM(ERROR, "Rx command URB allocation failed\n"); ret = MLAN_STATUS_FAILURE; goto init_exit; } if ((cardp->rx_cmd.pmbuf = woal_alloc_mlan_buffer(MLAN_RX_CMD_BUF_SIZE))) { /* Submit Rx command URB */ if (woal_usb_submit_rx_urb(&cardp->rx_cmd, MLAN_RX_CMD_BUF_SIZE) < 0) { ret = MLAN_STATUS_FAILURE; goto init_exit; } } for (i = 0; i < MVUSB_RX_DATA_URB; i++) { cardp->rx_data_list[i].handle = handle; cardp->rx_data_list[i].ep = cardp->rx_data_ep; /* Allocate URB for data */ if (!(cardp->rx_data_list[i].urb = usb_alloc_urb(0, GFP_ATOMIC))) { PRINTM(ERROR, "Rx data URB allocation failed\n"); ret = MLAN_STATUS_FAILURE; goto init_exit; } /* Submit Rx data URB */ if (woal_usb_submit_rx_urb (&cardp->rx_data_list[i], MLAN_RX_DATA_BUF_SIZE) < 0) { ret = MLAN_STATUS_FAILURE; goto init_exit; } } init_exit: LEAVE(); return ret; }
/** * @brief This function sets up the data to receive * * @param ctx Pointer to urb_context structure * @param size Skb size * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ static mlan_status woal_usb_submit_rx_urb(urb_context * ctx, int size) { moal_handle *handle = ctx->handle; struct usb_card_rec *cardp = (struct usb_card_rec *) handle->card; mlan_status ret = MLAN_STATUS_FAILURE; ENTER(); if (cardp->rx_cmd_ep != ctx->ep) { if (!(ctx->pmbuf = woal_alloc_mlan_buffer(handle, size))) { PRINTM(MERROR, "Fail to submit Rx URB due to no memory/skb\n"); goto rx_ret; } } ctx->pmbuf->data_offset = MLAN_NET_IP_ALIGN + MLAN_RX_HEADER_LEN; usb_fill_bulk_urb(ctx->urb, cardp->udev, usb_rcvbulkpipe(cardp->udev, ctx->ep), ctx->pmbuf->pbuf + ctx->pmbuf->data_offset, size - ctx->pmbuf->data_offset, woal_usb_receive, (void *) ctx); if (cardp->rx_cmd_ep == ctx->ep) atomic_inc(&cardp->rx_cmd_urb_pending); else atomic_inc(&cardp->rx_data_urb_pending); if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) { /* Submit URB failure */ PRINTM(MERROR, "Submit Rx URB failed: %d\n", ret); woal_free_mlan_buffer(handle, ctx->pmbuf); if (cardp->rx_cmd_ep == ctx->ep) atomic_dec(&cardp->rx_cmd_urb_pending); else atomic_dec(&cardp->rx_data_urb_pending); ctx->pmbuf = NULL; ret = MLAN_STATUS_FAILURE; } else { ret = MLAN_STATUS_SUCCESS; } rx_ret: LEAVE(); return ret; }
/** * @brief This function sets up the data to receive * * @param ctx Pointer to urb_context structure * @param size Skb size * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ static mlan_status woal_usb_submit_rx_urb(urb_context * ctx, int size) { moal_handle *handle = ctx->handle; struct usb_card_rec *cardp = (struct usb_card_rec *) handle->card; struct sk_buff *skb = NULL; mlan_status ret = MLAN_STATUS_FAILURE; ENTER(); if (cardp->rx_cmd_ep != ctx->ep) { if (!(ctx->pmbuf = woal_alloc_mlan_buffer(size))) { PRINTM(ERROR, "No free skb\n"); goto rx_ret; } } skb = (struct sk_buff *) ctx->pmbuf->pdesc; ctx->pmbuf->data_offset = MLAN_NET_IP_ALIGN; usb_fill_bulk_urb(ctx->urb, cardp->udev, usb_rcvbulkpipe(cardp->udev, ctx->ep), skb->tail + MLAN_NET_IP_ALIGN, size - MLAN_NET_IP_ALIGN, woal_usb_receive, (void *) ctx); if (cardp->rx_cmd_ep == ctx->ep) atomic_inc(&cardp->rx_cmd_urb_pending); else atomic_inc(&cardp->rx_data_urb_pending); if ((ret = usb_submit_urb(ctx->urb, GFP_ATOMIC))) { /* Submit URB failure */ PRINTM(ERROR, "Submit Rx URB failed: %d\n", ret); woal_free_mlan_buffer(ctx->pmbuf); if (cardp->rx_cmd_ep == ctx->ep) atomic_dec(&cardp->rx_cmd_urb_pending); else atomic_dec(&cardp->rx_data_urb_pending); ctx->pmbuf = NULL; } else { ret = MLAN_STATUS_SUCCESS; } rx_ret: LEAVE(); return ret; }
/** * @brief Handle resume * * @param intf Pointer to usb_interface * * @return MLAN_STATUS_SUCCESS */ static int woal_usb_resume(struct usb_interface *intf) { struct usb_card_rec *cardp = usb_get_intfdata(intf); moal_handle *handle = NULL; int i; ENTER(); if (!cardp || !cardp->phandle) { PRINTM(MERROR, "Card or adapter structure is not valid\n"); LEAVE(); return MLAN_STATUS_SUCCESS; } handle = cardp->phandle; if (handle->is_suspended == MFALSE) { PRINTM(MWARN, "Device already resumed\n"); LEAVE(); return MLAN_STATUS_SUCCESS; } /* Indicate device resumed. The netdev queue will be resumed only after the urbs have been resubmitted */ handle->is_suspended = MFALSE; if (!atomic_read(&cardp->rx_data_urb_pending)) { /* Submit multiple Rx data URBs */ woal_usb_submit_rx_data_urbs(handle); } if (!atomic_read(&cardp->rx_cmd_urb_pending)) { if ((cardp->rx_cmd.pmbuf = woal_alloc_mlan_buffer(handle, MLAN_RX_CMD_BUF_SIZE))) woal_usb_submit_rx_urb(&cardp->rx_cmd, MLAN_RX_CMD_BUF_SIZE); } for (i = 0; i < handle->priv_num; i++) if (handle->priv[i]->media_connected == MTRUE) netif_carrier_on(handle->priv[i]->netdev); /* Disable Host Sleep */ if (handle->hs_activated) woal_cancel_hs(woal_get_priv(handle, MLAN_BSS_ROLE_ANY), MOAL_NO_WAIT); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) #ifdef CONFIG_PM /* Resume handler may be called due to remote wakeup, force to exit suspend anyway */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) cardp->udev->autosuspend_disabled = 1; #else cardp->udev->dev.power.runtime_auto = 0; #endif /* < 2.6.35 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) cardp->udev->autoresume_disabled = 0; #endif /* < 2.6.33 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34) atomic_inc(&(cardp->udev)->dev.power.usage_count); #endif /* >= 2.6.34 */ #endif /* CONFIG_PM */ #endif /* >= 2.6.24 */ LEAVE(); return MLAN_STATUS_SUCCESS; }