/* * This function sends a packet to device. * * It processes the packet to add the TxPD, checks condition and * sends the processed packet to firmware for transmission. * * On successful completion, the function calls the completion callback * and logs the time. */ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { int ret = -1; struct mwifiex_adapter *adapter = priv->adapter; u8 *head_ptr; struct txpd *local_tx_pd = NULL; head_ptr = (u8 *) mwifiex_process_sta_txpd(priv, skb); if (head_ptr) { if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) local_tx_pd = (struct txpd *) (head_ptr + INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb->data, skb->len, tx_param); } switch (ret) { case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { priv->adapter->tx_lock_flag = false; local_tx_pd->flags = 0; } dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: adapter->data_sent = false; dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, ret); break; case -EINPROGRESS: adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb, ret); break; default: break; } return ret; }
static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter) { struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; struct usb_tx_data_port *port; struct sk_buff *skb_tmp; int idx; for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) { port = &card->port[idx]; if (adapter->bus_aggr.enable) while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list))) mwifiex_write_data_complete(adapter, skb_tmp, 0, -1); del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); port->tx_aggr.timer_cnxt.is_hold_timer_set = false; port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; } }
static void mwifiex_usb_tx_aggr_tmo(struct timer_list *t) { struct urb_context *urb_cnxt = NULL; struct sk_buff *skb_send = NULL; struct tx_aggr_tmr_cnxt *timer_context = from_timer(timer_context, t, hold_timer); struct mwifiex_adapter *adapter = timer_context->adapter; struct usb_tx_data_port *port = timer_context->port; unsigned long flags; int err = 0; spin_lock_irqsave(&port->tx_aggr_lock, flags); err = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send); if (err) { mwifiex_dbg(adapter, ERROR, "prepare tx aggr skb failed, err=%d\n", err); goto unlock; } if (atomic_read(&port->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) { port->block_status = true; adapter->data_sent = mwifiex_usb_data_sent(adapter); err = -1; goto done; } if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB) port->tx_data_ix = 0; urb_cnxt = &port->tx_data_list[port->tx_data_ix++]; err = mwifiex_usb_construct_send_urb(adapter, port, port->tx_data_ep, urb_cnxt, skb_send); done: if (err == -1) mwifiex_write_data_complete(adapter, skb_send, 0, -1); unlock: spin_unlock_irqrestore(&port->tx_aggr_lock, flags); }
static void mwifiex_usb_tx_complete(struct urb *urb) { struct urb_context *context = (struct urb_context *)(urb->context); struct mwifiex_adapter *adapter = context->adapter; struct usb_card_rec *card = adapter->card; struct usb_tx_data_port *port; int i; mwifiex_dbg(adapter, INFO, "%s: status: %d\n", __func__, urb->status); if (context->ep == card->tx_cmd_ep) { mwifiex_dbg(adapter, CMD, "%s: CMD\n", __func__); atomic_dec(&card->tx_cmd_urb_pending); adapter->cmd_sent = false; } else { mwifiex_dbg(adapter, DATA, "%s: DATA\n", __func__); mwifiex_write_data_complete(adapter, context->skb, 0, urb->status ? -1 : 0); for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) { port = &card->port[i]; if (context->ep == port->tx_data_ep) { atomic_dec(&port->tx_data_urb_pending); port->block_status = false; break; } } adapter->data_sent = false; } if (card->mc_resync_flag) mwifiex_multi_chan_resync(adapter); mwifiex_queue_main_work(adapter); return; }
static void mwifiex_usb_tx_complete(struct urb *urb) { struct urb_context *context = (struct urb_context *)(urb->context); struct mwifiex_adapter *adapter = context->adapter; struct usb_card_rec *card = adapter->card; dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status); if (context->ep == card->tx_cmd_ep) { dev_dbg(adapter->dev, "%s: CMD\n", __func__); atomic_dec(&card->tx_cmd_urb_pending); adapter->cmd_sent = false; } else { dev_dbg(adapter->dev, "%s: DATA\n", __func__); atomic_dec(&card->tx_data_urb_pending); mwifiex_write_data_complete(adapter, context->skb, 0, urb->status ? -1 : 0); } queue_work(adapter->workqueue, &adapter->main_work); return; }
/* * This function is used to shutdown the driver. * * The following operations are performed sequentially - * - Check if already shut down * - Make sure the main process has stopped * - Clean up the Tx and Rx queues * - Delete BSS priority tables * - Free the adapter * - Notify completion */ int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) { int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; unsigned long flags; struct sk_buff *skb; /* mwifiex already shutdown */ if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) return 0; adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING; /* wait for mwifiex_process to complete */ if (adapter->mwifiex_processing) { mwifiex_dbg(adapter, WARN, "main process is still running\n"); return ret; } /* cancel current command */ if (adapter->curr_cmd) { mwifiex_dbg(adapter, WARN, "curr_cmd is still in processing\n"); del_timer_sync(&adapter->cmd_timer); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); adapter->curr_cmd = NULL; } /* shut down mwifiex */ mwifiex_dbg(adapter, MSG, "info: shutdown mwifiex...\n"); /* Clean up Tx/Rx queues and delete BSS priority table */ for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { priv = adapter->priv[i]; mwifiex_clean_auto_tdls(priv); mwifiex_abort_cac(priv); mwifiex_clean_txrx(priv); mwifiex_delete_bss_prio_tbl(priv); } } atomic_set(&adapter->tx_queued, 0); while ((skb = skb_dequeue(&adapter->tx_data_q))) mwifiex_write_data_complete(adapter, skb, 0, 0); spin_lock_irqsave(&adapter->rx_proc_lock, flags); while ((skb = skb_dequeue(&adapter->rx_data_q))) { struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); atomic_dec(&adapter->rx_pending); priv = adapter->priv[rx_info->bss_num]; if (priv) priv->stats.rx_dropped++; dev_kfree_skb_any(skb); } spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); spin_lock(&adapter->mwifiex_lock); mwifiex_adapter_cleanup(adapter); spin_unlock(&adapter->mwifiex_lock); /* Notify completion */ ret = mwifiex_shutdown_fw_complete(adapter); return ret; }
/* This function prepare data packet to be send under usb tx aggregation * protocol, check current usb aggregation status, link packet to aggrgation * list if possible, work flow as below: * (1) if only 1 packet available, add usb tx aggregation header and send. * (2) if packet is able to aggregated, link it to current aggregation list. * (3) if packet is not able to aggregated, aggregate and send exist packets * in aggrgation list. Then, link packet in the list if there is more * packet in transmit queue, otherwise try to transmit single packet. */ static int mwifiex_usb_aggr_tx_data(struct mwifiex_adapter *adapter, u8 ep, struct sk_buff *skb, struct mwifiex_tx_param *tx_param, struct usb_tx_data_port *port) { u8 *payload, pad; u16 align = adapter->bus_aggr.tx_aggr_align; struct sk_buff *skb_send = NULL; struct urb_context *context = NULL; struct txpd *local_tx_pd = (struct txpd *)((u8 *)skb->data + adapter->intf_hdr_len); u8 f_send_aggr_buf = 0; u8 f_send_cur_buf = 0; u8 f_precopy_cur_buf = 0; u8 f_postcopy_cur_buf = 0; u32 timeout; int ret; /* padding to ensure each packet alginment */ pad = (align - (skb->len & (align - 1))) % align; if (tx_param && tx_param->next_pkt_len) { /* next packet available in tx queue*/ if (port->tx_aggr.aggr_len + skb->len + pad > adapter->bus_aggr.tx_aggr_max_size) { f_send_aggr_buf = 1; f_postcopy_cur_buf = 1; } else { /* current packet could be aggregated*/ f_precopy_cur_buf = 1; if (port->tx_aggr.aggr_len + skb->len + pad + tx_param->next_pkt_len > adapter->bus_aggr.tx_aggr_max_size || port->tx_aggr.aggr_num + 2 > adapter->bus_aggr.tx_aggr_max_num) { /* next packet could not be aggregated * send current aggregation buffer */ f_send_aggr_buf = 1; } } } else { /* last packet in tx queue */ if (port->tx_aggr.aggr_num > 0) { /* pending packets in aggregation buffer*/ if (port->tx_aggr.aggr_len + skb->len + pad > adapter->bus_aggr.tx_aggr_max_size) { /* current packet not be able to aggregated, * send aggr buffer first, then send packet. */ f_send_cur_buf = 1; } else { /* last packet, Aggregation and send */ f_precopy_cur_buf = 1; } f_send_aggr_buf = 1; } else { /* no pending packets in aggregation buffer, * send current packet immediately */ f_send_cur_buf = 1; } } if (local_tx_pd->flags & MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET) { /* Send NULL packet immediately*/ if (f_precopy_cur_buf) { if (skb_queue_empty(&port->tx_aggr.aggr_list)) { f_precopy_cur_buf = 0; f_send_aggr_buf = 0; f_send_cur_buf = 1; } else { f_send_aggr_buf = 1; } } else if (f_postcopy_cur_buf) { f_send_cur_buf = 1; f_postcopy_cur_buf = 0; } } if (f_precopy_cur_buf) { skb_queue_tail(&port->tx_aggr.aggr_list, skb); port->tx_aggr.aggr_len += (skb->len + pad); port->tx_aggr.aggr_num++; if (f_send_aggr_buf) goto send_aggr_buf; /* packet will not been send immediately, * set a timer to make sure it will be sent under * strict time limit. Dynamically fit the timeout * value, according to packets number in aggr_list */ if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) { port->tx_aggr.timer_cnxt.hold_tmo_msecs = MWIFIEX_USB_TX_AGGR_TMO_MIN; timeout = port->tx_aggr.timer_cnxt.hold_tmo_msecs; mod_timer(&port->tx_aggr.timer_cnxt.hold_timer, jiffies + msecs_to_jiffies(timeout)); port->tx_aggr.timer_cnxt.is_hold_timer_set = true; } else { if (port->tx_aggr.timer_cnxt.hold_tmo_msecs < MWIFIEX_USB_TX_AGGR_TMO_MAX) { /* Dyanmic fit timeout */ timeout = ++port->tx_aggr.timer_cnxt.hold_tmo_msecs; mod_timer(&port->tx_aggr.timer_cnxt.hold_timer, jiffies + msecs_to_jiffies(timeout)); } } } send_aggr_buf: if (f_send_aggr_buf) { ret = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send); if (!ret) { context = &port->tx_data_list[port->tx_data_ix++]; ret = mwifiex_usb_construct_send_urb(adapter, port, ep, context, skb_send); if (ret == -1) mwifiex_write_data_complete(adapter, skb_send, 0, -1); } } if (f_send_cur_buf) { if (f_send_aggr_buf) { if (atomic_read(&port->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) { port->block_status = true; adapter->data_sent = mwifiex_usb_data_sent(adapter); /* no available urb, postcopy packet*/ f_postcopy_cur_buf = 1; goto postcopy_cur_buf; } if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB) port->tx_data_ix = 0; } payload = skb->data; *(u16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80); *(u16 *)payload = cpu_to_le16(skb->len); skb_send = skb; context = &port->tx_data_list[port->tx_data_ix++]; return mwifiex_usb_construct_send_urb(adapter, port, ep, context, skb_send); } postcopy_cur_buf: if (f_postcopy_cur_buf) { skb_queue_tail(&port->tx_aggr.aggr_list, skb); port->tx_aggr.aggr_len += (skb->len + pad); port->tx_aggr.aggr_num++; /* New aggregation begin, start timer */ if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) { port->tx_aggr.timer_cnxt.hold_tmo_msecs = MWIFIEX_USB_TX_AGGR_TMO_MIN; timeout = port->tx_aggr.timer_cnxt.hold_tmo_msecs; mod_timer(&port->tx_aggr.timer_cnxt.hold_timer, jiffies + msecs_to_jiffies(timeout)); port->tx_aggr.timer_cnxt.is_hold_timer_set = true; } } return -EINPROGRESS; }
static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter, struct usb_tx_data_port *port, struct sk_buff **skb_send) { struct sk_buff *skb_aggr, *skb_tmp; u8 *payload, pad; u16 align = adapter->bus_aggr.tx_aggr_align; struct mwifiex_txinfo *tx_info = NULL; bool is_txinfo_set = false; /* Packets in aggr_list will be send in either skb_aggr or * write complete, delete the tx_aggr timer */ if (port->tx_aggr.timer_cnxt.is_hold_timer_set) { del_timer(&port->tx_aggr.timer_cnxt.hold_timer); port->tx_aggr.timer_cnxt.is_hold_timer_set = false; port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; } skb_aggr = mwifiex_alloc_dma_align_buf(port->tx_aggr.aggr_len, GFP_ATOMIC); if (!skb_aggr) { mwifiex_dbg(adapter, ERROR, "%s: alloc skb_aggr failed\n", __func__); while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list))) mwifiex_write_data_complete(adapter, skb_tmp, 0, -1); port->tx_aggr.aggr_num = 0; port->tx_aggr.aggr_len = 0; return -EBUSY; } tx_info = MWIFIEX_SKB_TXCB(skb_aggr); memset(tx_info, 0, sizeof(*tx_info)); while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list))) { /* padding for aligning next packet header*/ pad = (align - (skb_tmp->len & (align - 1))) % align; payload = skb_put(skb_aggr, skb_tmp->len + pad); memcpy(payload, skb_tmp->data, skb_tmp->len); if (skb_queue_empty(&port->tx_aggr.aggr_list)) { /* do not padding for last packet*/ *(u16 *)payload = cpu_to_le16(skb_tmp->len); *(u16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80); skb_trim(skb_aggr, skb_aggr->len - pad); } else { /* add aggregation interface header */ *(u16 *)payload = cpu_to_le16(skb_tmp->len + pad); *(u16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2); } if (!is_txinfo_set) { tx_info->bss_num = MWIFIEX_SKB_TXCB(skb_tmp)->bss_num; tx_info->bss_type = MWIFIEX_SKB_TXCB(skb_tmp)->bss_type; is_txinfo_set = true; } port->tx_aggr.aggr_num--; port->tx_aggr.aggr_len -= (skb_tmp->len + pad); mwifiex_write_data_complete(adapter, skb_tmp, 0, 0); } tx_info->pkt_len = skb_aggr->len - (sizeof(struct txpd) + adapter->intf_hdr_len); tx_info->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT; port->tx_aggr.aggr_num = 0; port->tx_aggr.aggr_len = 0; *skb_send = skb_aggr; return 0; }
/* * This function sends a packet to device. * * It processes the packet to add the TxPD, checks condition and * sends the processed packet to firmware for transmission. * * On successful completion, the function calls the completion callback * and logs the time. */ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { int ret = -1; struct mwifiex_adapter *adapter = priv->adapter; u8 *head_ptr; struct txpd *local_tx_pd = NULL; if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) head_ptr = mwifiex_process_uap_txpd(priv, skb); else head_ptr = mwifiex_process_sta_txpd(priv, skb); if (head_ptr) { if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) local_tx_pd = (struct txpd *) (head_ptr + INTF_HEADER_LEN); if (adapter->iface_type == MWIFIEX_USB) { adapter->data_sent = true; skb_pull(skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, skb, NULL); } else { ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, tx_param); } } switch (ret) { case -ENOSR: dev_err(adapter->dev, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { priv->adapter->tx_lock_flag = false; if (local_tx_pd) local_tx_pd->flags = 0; } dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; case -EINPROGRESS: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); break; default: break; } return ret; }