static void usb_rx_complete(struct urb *urb) { struct if_usb_devdata *pipe_data = urb->context; struct usb_link_device *usb_ld = usb_get_intfdata(pipe_data->data_intf); struct io_device *iod; int iod_format = IPC_FMT; int ret; usb_mark_last_busy(urb->dev); switch (urb->status) { case 0: case -ENOENT: if (!urb->actual_length) goto re_submit; /* call iod recv */ /* how we can distinguish boot ch with fmt ch ?? */ switch (pipe_data->format) { case IF_USB_FMT_EP: iod_format = IPC_FMT; pr_buffer("rx", (char *)urb->transfer_buffer, (size_t)urb->actual_length, 16); break; case IF_USB_RAW_EP: iod_format = IPC_MULTI_RAW; break; case IF_USB_RFS_EP: iod_format = IPC_RFS; break; default: break; } /* during boot stage fmt end point */ /* shared with boot io device */ /* when we use fmt device only, at boot and ipc exchange it can be reduced to 1 device */ if (iod_format == IPC_FMT && usb_ld->ld.com_state == COM_BOOT) iod_format = IPC_BOOT; if (iod_format == IPC_FMT && usb_ld->ld.com_state == COM_CRASH) iod_format = IPC_RAMDUMP; iod = link_get_iod_with_format(&usb_ld->ld, iod_format); if (iod) { ret = iod->recv(iod, &usb_ld->ld, (char *)urb->transfer_buffer, urb->actual_length); if (ret < 0) mif_err("io device recv error :%d\n", ret); } re_submit: if (urb->status || atomic_read(&usb_ld->suspend_count)) break; usb_mark_last_busy(urb->dev); usb_rx_submit(pipe_data, urb, GFP_ATOMIC); return; case -ESHUTDOWN: case -EPROTO: break; case -EOVERFLOW: mif_err("RX overflow\n"); break; default: mif_err("RX complete Status (%d)\n", urb->status); break; } usb_anchor_urb(urb, &pipe_data->urbs); }
static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct usblp *usblp = file->private_data; char *writebuf; struct urb *writeurb; int rv; int transfer_length; ssize_t writecount = 0; if (mutex_lock_interruptible(&usblp->wmut)) { rv = -EINTR; goto raise_biglock; } if ((rv = usblp_wwait(usblp, !!(file->f_flags & O_NONBLOCK))) < 0) goto raise_wait; while (writecount < count) { /* * Step 1: Submit next block. */ if ((transfer_length = count - writecount) > USBLP_BUF_SIZE) transfer_length = USBLP_BUF_SIZE; rv = -ENOMEM; if ((writebuf = kmalloc(USBLP_BUF_SIZE, GFP_KERNEL)) == NULL) goto raise_buf; if ((writeurb = usb_alloc_urb(0, GFP_KERNEL)) == NULL) goto raise_urb; usb_fill_bulk_urb(writeurb, usblp->dev, usb_sndbulkpipe(usblp->dev, usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress), writebuf, transfer_length, usblp_bulk_write, usblp); usb_anchor_urb(writeurb, &usblp->urbs); if (copy_from_user(writebuf, buffer + writecount, transfer_length)) { rv = -EFAULT; goto raise_badaddr; } spin_lock_irq(&usblp->lock); usblp->wcomplete = 0; spin_unlock_irq(&usblp->lock); if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) { usblp->wstatus = 0; spin_lock_irq(&usblp->lock); usblp->no_paper = 0; usblp->wcomplete = 1; wake_up(&usblp->wwait); spin_unlock_irq(&usblp->lock); if (rv != -ENOMEM) rv = -EIO; goto raise_submit; } /* * Step 2: Wait for transfer to end, collect results. */ rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); if (rv < 0) { if (rv == -EAGAIN) { /* Presume that it's going to complete well. */ writecount += transfer_length; } if (rv == -ENOSPC) { spin_lock_irq(&usblp->lock); usblp->no_paper = 1; /* Mark for poll(2) */ spin_unlock_irq(&usblp->lock); writecount += transfer_length; } /* Leave URB dangling, to be cleaned on close. */ goto collect_error; } if (usblp->wstatus < 0) { rv = -EIO; goto collect_error; } /* * This is critical: it must be our URB, not other writer's. * The wmut exists mainly to cover us here. */ writecount += usblp->wstatus; } mutex_unlock(&usblp->wmut); return writecount; raise_submit: raise_badaddr: usb_unanchor_urb(writeurb); usb_free_urb(writeurb); raise_urb: kfree(writebuf); raise_buf: raise_wait: collect_error: /* Out of raise sequence */ mutex_unlock(&usblp->wmut); raise_biglock: return writecount ? writecount : rv; }
static void resp_avail_cb(struct urb *urb) { struct usb_device *udev; struct ctrl_pkt_list_elem *list_elem = NULL; struct rmnet_ctrl_dev *dev = urb->context; void *cpkt; int status = 0; size_t cpkt_size = 0; unsigned int iface_num; udev = interface_to_usbdev(dev->intf); iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber; usb_autopm_put_interface_async(dev->intf); switch (urb->status) { case 0: /*success*/ dev->get_encap_resp_cnt++; break; /*do not resubmit*/ case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: case -EPROTO: return; /*resubmit*/ case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } dev_dbg(dev->devicep, "Read %d bytes for %s\n", urb->actual_length, dev->name); cpkt = urb->transfer_buffer; cpkt_size = urb->actual_length; if (!cpkt_size) { dev->zlp_cnt++; dev_dbg(dev->devicep, "%s: zero length pkt received\n", __func__); goto resubmit_int_urb; } list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC); if (!list_elem) { dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__); return; } list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC); if (!list_elem->cpkt.data) { dev_err(dev->devicep, "%s: list_elem->data alloc failed\n", __func__); kfree(list_elem); return; } memcpy(list_elem->cpkt.data, cpkt, cpkt_size); list_elem->cpkt.data_size = cpkt_size; spin_lock(&dev->rx_lock); list_add_tail(&list_elem->list, &dev->rx_list); spin_unlock(&dev->rx_lock); rd_cb_time = cpu_clock(smp_processor_id()); wake_up(&dev->read_wait_queue); resubmit_int_urb: /*check if it is already submitted in resume*/ if (!dev->inturb->anchor) { usb_mark_last_busy(udev); usb_anchor_urb(dev->inturb, &dev->rx_submitted); status = usb_submit_urb(dev->inturb, GFP_ATOMIC); if (status) { usb_unanchor_urb(dev->inturb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } DBG_NOTI("[CHKRA:%d]>", iface_num); } }
int diag_bridge_read(char *data, int size) { struct urb *urb = NULL; unsigned int pipe; struct diag_bridge *dev = __dev; int ret; pr_debug("reading %d bytes", size); if (!dev || !dev->ifc) { pr_err("device is disconnected"); return -ENODEV; } if (!dev->ops) { pr_err("bridge is not open"); return -ENODEV; } if (!size) { dev_err(&dev->ifc->dev, "invalid size:%d\n", size); return -EINVAL; } /* if there was a previous unrecoverable error, just quit */ if (dev->err) return -ENODEV; kref_get(&dev->kref); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&dev->ifc->dev, "unable to allocate urb\n"); ret = -ENOMEM; goto error; } ret = usb_autopm_get_interface(dev->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("read: autopm_get failed:%d", ret); goto free_error; } pipe = usb_rcvbulkpipe(dev->udev, dev->in_epAddr); usb_fill_bulk_urb(urb, dev->udev, pipe, data, size, diag_bridge_read_cb, dev); usb_anchor_urb(urb, &dev->submitted); dev->pending_reads++; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err_ratelimited("submitting urb failed err:%d", ret); dev->pending_reads--; usb_unanchor_urb(urb); } usb_autopm_put_interface(dev->ifc); free_error: usb_free_urb(urb); error: if (ret) /* otherwise this is done in the completion handler */ kref_put(&dev->kref, diag_bridge_delete); return ret; }
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; int rc; unsigned int idx; struct gs_tx_context *txc; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ txc = gs_alloc_tx_context(dev); if (!txc) return NETDEV_TX_BUSY; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(netdev, "No memory left for URB\n"); goto nomem_urb; } hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, &urb->transfer_dma); if (!hf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomem_hf; } idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { netdev_err(netdev, "Invalid tx context %d\n", idx); goto badidx; } hf->echo_id = idx; hf->channel = dev->channel; cf = (struct can_frame *)skb->data; hf->can_id = cf->can_id; hf->can_dlc = cf->can_dlc; memcpy(hf->data, cf->data, cf->can_dlc); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), hf, sizeof(*hf), gs_usb_xmit_callback, txc); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, idx); atomic_inc(&dev->active_tx_urbs); rc = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); can_free_echo_skb(netdev, idx); gs_free_tx_context(txc); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); if (rc == -ENODEV) { netif_device_detach(netdev); } else { netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); stats->tx_dropped++; } } else { /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS) netif_stop_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(urb); return NETDEV_TX_OK; badidx: usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); nomem_hf: usb_free_urb(urb); nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; }
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); if (likely(rtl_action_proc(hw, skb, false))) ieee80211_rx(hw, skb); else dev_kfree_skb_any(skb); } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, _skb); ieee80211_rx(hw, _skb); } } #define __RX_SKB_MAX_QUEUED 64 static void _rtl_rx_work(unsigned long param) { struct rtl_usb *rtlusb = (struct rtl_usb *)param; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct sk_buff *skb; while ((skb = skb_dequeue(&rtlusb->rx_queue))) { if (unlikely(IS_USB_STOP(rtlusb))) { dev_kfree_skb_any(skb); continue; } if (likely(!rtlusb->usb_rx_segregate_hdl)) { _rtl_usb_rx_process_noagg(hw, skb); } else { /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } } } static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, unsigned int len) { #if NET_IP_ALIGN != 0 unsigned int padding = 0; #endif /* make function no-op when possible */ if (NET_IP_ALIGN == 0 || len < sizeof(*hdr)) return 0; #if NET_IP_ALIGN != 0 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */ /* TODO: deduplicate common code, define helper function instead? */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); padding ^= NET_IP_ALIGN; /* Input might be invalid, avoid accessing memory outside * the buffer. */ if ((unsigned long)qc - (unsigned long)hdr < len && *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) padding ^= NET_IP_ALIGN; } if (ieee80211_has_a4(hdr->frame_control)) padding ^= NET_IP_ALIGN; return padding; #endif } #define __RADIO_TAP_SIZE_RSV 32 static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { unsigned int padding; struct sk_buff *skb; unsigned int qlen; unsigned int size = _urb->actual_length; struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Too short packet from bulk IN! (len: %d)\n", size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Pending RX skbuff queue full! (qlen: %d)\n", qlen); goto resubmit; } hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE); padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE); skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Can't allocate skb for bulk IN!\n"); goto resubmit; } _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); /* Make sure the payload data is 4 byte aligned. */ skb_reserve(skb, padding); /* reserve some space for mac80211's radiotap */ skb_reserve(skb, __RADIO_TAP_SIZE_RSV); memcpy(skb_put(skb, size), _urb->transfer_buffer, size); skb_queue_tail(&rtlusb->rx_queue, skb); tasklet_schedule(&rtlusb->rx_work_tasklet); goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: /* On some architectures, usb_free_coherent must not be called from * hardirq context. Queue urb to cleanup list. */ usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs); } #undef __RADIO_TAP_SIZE_RSV static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; int err; int i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to alloc URB!!\n"); goto err_out; } err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) goto err_out; usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); _rtl_usb_cleanup_rx(hw); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ err = _rtl_usb_receive(hw); } return err; }
static int btusb_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hdev->driver_data; struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = data->cmdreq_type; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); urb->dev = data->udev; urb->pipe = pipe; urb->context = skb; urb->complete = btusb_isoc_tx_complete; urb->interval = data->isoc_tx_ep->bInterval; urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = skb->data; urb->transfer_buffer_length = skb->len; __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); hdev->stat.sco_tx++; goto skip_waking; default: return -EILSEQ; } err = inc_tx(data); if (err) { usb_anchor_urb(urb, &data->deferred); schedule_work(&data->waker); err = 0; goto done; } skip_waking: usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } usb_free_urb(urb); done: return err; }
static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct ems_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ems_cpc_msg *msg; struct urb *urb; u8 *buf; int i, err; size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); goto nomem; } buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); goto nomem; } msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); msg->msg.can_msg.length = cf->can_dlc; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME; msg->length = CPC_CAN_MSG_MIN_SIZE; } else { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; for (i = 0; i < cf->can_dlc; i++) msg->msg.can_msg.msg[i] = cf->data[i]; msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; } for (i = 0; i < MAX_TX_URBS; i++) { if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &dev->tx_contexts[i]; break; } } /* * May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) { usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); return NETDEV_TX_BUSY; } context->dev = dev; context->echo_index = i; context->dlc = cf->can_dlc; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); if (err == -ENODEV) { netif_device_detach(netdev); } else { netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } } else { netif_trans_update(netdev); /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { netif_stop_queue(netdev); } } /* * Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; }
static ssize_t skel_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos) { struct usb_skel *dev; int retval = 0; struct urb *urb = NULL; char *buf = NULL; size_t writesize = min(count, (size_t)MAX_TRANSFER); dev = file->private_data; if (count == 0) goto exit; if (!(file->f_flags & O_NONBLOCK)) { if (down_interruptible(&dev->limit_sem)) { retval = -ERESTARTSYS; goto exit; } } else { if (down_trylock(&dev->limit_sem)) { retval = -EAGAIN; goto exit; } } spin_lock_irq(&dev->err_lock); retval = dev->errors; if (retval < 0) { dev->errors = 0; retval = (retval == -EPIPE) ? retval : -EIO; } spin_unlock_irq(&dev->err_lock); if (retval < 0) goto error; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL, &urb->transfer_dma); if (!buf) { retval = -ENOMEM; goto error; } if (copy_from_user(buf, user_buffer, writesize)) { retval = -EFAULT; goto error; } mutex_lock(&dev->io_mutex); if (!dev->interface) { mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; } usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), buf, writesize, skel_write_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->submitted); retval = usb_submit_urb(urb, GFP_KERNEL); mutex_unlock(&dev->io_mutex); if (retval) { err("%s - failed submitting write urb, error %d", __func__, retval); goto error_unanchor; } usb_free_urb(urb); return writesize; error_unanchor: usb_unanchor_urb(urb); error: if (urb) { usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma); usb_free_urb(urb); } up(&dev->limit_sem); exit: return retval; }
int diag_bridge_write(int id, char *data, int size) { struct urb *urb = NULL; unsigned int pipe; struct diag_bridge *dev; int ret; if (id < 0 || id >= MAX_DIAG_BRIDGE_DEVS) { pr_err("Invalid device ID"); return -ENODEV; } pr_debug("writing %d bytes", size); dev = __dev[id]; if (!dev) { pr_err("device is disconnected"); return -ENODEV; } mutex_lock(&dev->ifc_mutex); if (!dev->ifc) { ret = -ENODEV; goto error; } if (!dev->ops) { pr_err("bridge is not open"); ret = -ENODEV; goto error; } if (!size) { dev_err(&dev->ifc->dev, "invalid size:%d\n", size); ret = -EINVAL; goto error; } /* if there was a previous unrecoverable error, just quit */ if (dev->err) { ret = -ENODEV; goto error; } kref_get(&dev->kref); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&dev->ifc->dev, "unable to allocate urb\n"); ret = -ENOMEM; goto put_error; } ret = usb_autopm_get_interface(dev->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("write: autopm_get failed:%d", ret); goto free_error; } pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr); usb_fill_bulk_urb(urb, dev->udev, pipe, data, size, diag_bridge_write_cb, dev); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &dev->submitted); dev->pending_writes++; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err_ratelimited("submitting urb failed err:%d", ret); dev->pending_writes--; usb_unanchor_urb(urb); usb_autopm_put_interface(dev->ifc); goto free_error; } free_error: usb_free_urb(urb); put_error: if (ret) /* otherwise this is done in the completion handler */ kref_put(&dev->kref, diag_bridge_delete); error: mutex_unlock(&dev->ifc_mutex); return ret; }
/* * Start interface */ static int ems_usb_start(struct ems_usb *dev) { struct net_device *netdev = dev->netdev; int err, i; dev->intr_in_buffer[0] = 0; dev->free_slots = 50; /* initial size */ for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); /* Setup and start interrupt URB */ usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 1), dev->intr_in_buffer, INTR_IN_BUFFER_SIZE, ems_usb_read_interrupt_callback, dev, 1); err = usb_submit_urb(dev->intr_urb, GFP_KERNEL); if (err) { netdev_warn(netdev, "intr URB submit failed: %d\n", err); return err; } /* CPC-USB will transfer received message to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer CAN state changes to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer bus errors to host */ err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON); if (err) goto failed; err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL); if (err) goto failed; dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; }
static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, unsigned int plen, void *payload, unsigned int outlen, void *out) { struct ar9170_usb *aru = (void *) ar; struct urb *urb = NULL; unsigned long flags; int err = -ENOMEM; if (unlikely(!IS_ACCEPTING_CMD(ar))) return -EPERM; if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4)) return -EINVAL; urb = usb_alloc_urb(0, GFP_ATOMIC); if (unlikely(!urb)) goto err_free; ar->cmdbuf[0] = cpu_to_le32(plen); ar->cmdbuf[0] |= cpu_to_le32(cmd << 8); /* writing multiple regs fills this buffer already */ if (plen && payload != (u8 *)(&ar->cmdbuf[1])) memcpy(&ar->cmdbuf[1], payload, plen); spin_lock_irqsave(&aru->common.cmdlock, flags); aru->readbuf = (u8 *)out; aru->readlen = outlen; spin_unlock_irqrestore(&aru->common.cmdlock, flags); usb_fill_int_urb(urb, aru->udev, usb_sndintpipe(aru->udev, AR9170_EP_CMD), aru->common.cmdbuf, plen + 4, ar9170_usb_tx_urb_complete, NULL, 1); usb_anchor_urb(urb, &aru->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(urb); usb_free_urb(urb); goto err_unbuf; } usb_free_urb(urb); err = wait_for_completion_timeout(&aru->cmd_wait, HZ); if (err == 0) { err = -ETIMEDOUT; goto err_unbuf; } if (aru->readlen != outlen) { err = -EMSGSIZE; goto err_unbuf; } return 0; err_unbuf: /* Maybe the device was removed in the second we were waiting? */ if (IS_STARTED(ar)) { dev_err(&aru->udev->dev, "no command feedback " "received (%d).\n", err); /* provide some maybe useful debug information */ print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE, aru->common.cmdbuf, plen + 4); dump_stack(); } /* invalidate to avoid completing the next prematurely */ spin_lock_irqsave(&aru->common.cmdlock, flags); aru->readbuf = NULL; aru->readlen = 0; spin_unlock_irqrestore(&aru->common.cmdlock, flags); err_free: return err; }
static int __devinit if_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *data_desc; struct usb_link_device *usb_ld = (struct usb_link_device *)id->driver_info; struct link_device *ld = &usb_ld->ld; struct usb_interface *data_intf; struct usb_device *usbdev = interface_to_usbdev(intf); struct device *dev, *ehci_dev, *root_hub; struct if_usb_devdata *pipe; struct urb *urb; int i; int j; int dev_id; int err; /* To detect usb device order probed */ dev_id = intf->cur_altsetting->desc.bInterfaceNumber; if (dev_id >= IF_USB_DEVNUM_MAX) { dev_err(&intf->dev, "Device id %d cannot support\n", dev_id); return -EINVAL; } if (!usb_ld) { dev_err(&intf->dev, "if_usb device doesn't be allocated\n"); err = ENOMEM; goto out; } mif_info("probe dev_id=%d usb_device_id(0x%p), usb_ld (0x%p)\n", dev_id, id, usb_ld); usb_ld->usbdev = usbdev; usb_get_dev(usbdev); for (i = 0; i < IF_USB_DEVNUM_MAX; i++) { data_intf = usb_ifnum_to_if(usbdev, i); /* remap endpoint of RAW to no.1 for LTE modem */ if (i == 0) pipe = &usb_ld->devdata[1]; else if (i == 1) pipe = &usb_ld->devdata[0]; else pipe = &usb_ld->devdata[i]; pipe->disconnected = 0; pipe->data_intf = data_intf; data_desc = data_intf->cur_altsetting; /* Endpoints */ if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) { pipe->rx_pipe = usb_rcvbulkpipe(usbdev, data_desc->endpoint[0].desc.bEndpointAddress); pipe->tx_pipe = usb_sndbulkpipe(usbdev, data_desc->endpoint[1].desc.bEndpointAddress); pipe->rx_buf_size = 1024*4; } else { pipe->rx_pipe = usb_rcvbulkpipe(usbdev, data_desc->endpoint[1].desc.bEndpointAddress); pipe->tx_pipe = usb_sndbulkpipe(usbdev, data_desc->endpoint[0].desc.bEndpointAddress); pipe->rx_buf_size = 1024*4; } if (i == 0) { dev_info(&usbdev->dev, "USB IF USB device found\n"); } else { err = usb_driver_claim_interface(&if_usb_driver, data_intf, usb_ld); if (err < 0) { mif_err("failed to cliam usb interface\n"); goto out; } } usb_set_intfdata(data_intf, usb_ld); usb_ld->dev_count++; pm_suspend_ignore_children(&data_intf->dev, true); for (j = 0; j < URB_COUNT; j++) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { mif_err("alloc urb fail\n"); err = -ENOMEM; goto out2; } urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer = usb_alloc_coherent(usbdev, pipe->rx_buf_size, GFP_KERNEL, &urb->transfer_dma); if (!urb->transfer_buffer) { mif_err( "Failed to allocate transfer buffer\n"); usb_free_urb(urb); err = -ENOMEM; goto out2; } usb_fill_bulk_urb(urb, usbdev, pipe->rx_pipe, urb->transfer_buffer, pipe->rx_buf_size, usb_rx_complete, pipe); usb_anchor_urb(urb, &pipe->urbs); } } /* temporary call reset_resume */ atomic_set(&usb_ld->suspend_count, 1); if_usb_reset_resume(data_intf); atomic_set(&usb_ld->suspend_count, 0); SET_HOST_ACTIVE(usb_ld->pdata, 1); usb_ld->host_wake_timeout_flag = 0; if (gpio_get_value(usb_ld->pdata->gpio_phone_active) && usb_modem_state) { struct link_pm_data *pm_data = usb_ld->link_pm_data; int delay = usb_ld->link_pm_data->autosuspend_delay_ms ?: DEFAULT_AUTOSUSPEND_DELAY_MS; pm_runtime_set_autosuspend_delay(&usbdev->dev, delay); dev = &usbdev->dev; if (dev->parent) { dev_dbg(&usbdev->dev, "if_usb Runtime PM Start!!\n"); usb_enable_autosuspend(usb_ld->usbdev); /* s5p-ehci runtime pm allow - usb phy suspend mode */ root_hub = &usbdev->bus->root_hub->dev; ehci_dev = root_hub->parent; mif_debug("ehci device = %s, %s\n", dev_driver_string(ehci_dev), dev_name(ehci_dev)); pm_runtime_allow(ehci_dev); if (pm_data->block_autosuspend) pm_runtime_forbid(dev); if (has_hub(usb_ld)) { usb_ld->link_pm_data->hub_status = (usb_ld->link_pm_data->root_hub) ? HUB_STATE_PREACTIVE : HUB_STATE_ACTIVE; } usb_ld->link_pm_data->root_hub = root_hub; } usb_ld->flow_suspend = 0; /* Queue work if skbs were pending before a disconnect/probe */ if (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0); usb_ld->if_usb_connected = 1; /*USB3503*/ mif_debug("hub active complete\n"); usb_change_modem_state(usb_ld, STATE_ONLINE); usb_modem_state = 0; } else {
static int usb_tx_urb_with_skb(struct usb_link_device *usb_ld, struct sk_buff *skb, struct if_usb_devdata *pipe_data) { int ret, cnt = 0; struct urb *urb; struct usb_device *usbdev = usb_ld->usbdev; unsigned long flags; if (!usbdev || (usbdev->state == USB_STATE_NOTATTACHED) || usb_ld->host_wake_timeout_flag) return -ENODEV; pm_runtime_get_noresume(&usbdev->dev); if (usbdev->dev.power.runtime_status == RPM_SUSPENDED || usbdev->dev.power.runtime_status == RPM_SUSPENDING) { usb_ld->resume_status = AP_INITIATED_RESUME; SET_SLAVE_WAKEUP(usb_ld->pdata, 1); while (!wait_event_interruptible_timeout(usb_ld->l2_wait, usbdev->dev.power.runtime_status == RPM_ACTIVE || pipe_data->disconnected, HOST_WAKEUP_TIMEOUT_JIFFIES)) { if (cnt == MAX_RETRY) { mif_err("host wakeup timeout !!\n"); SET_SLAVE_WAKEUP(usb_ld->pdata, 0); pm_runtime_put_autosuspend(&usbdev->dev); schedule_work(&usb_ld->disconnect_work); usb_ld->host_wake_timeout_flag = 1; return -1; } mif_err("host wakeup timeout ! retry..\n"); SET_SLAVE_WAKEUP(usb_ld->pdata, 0); udelay(100); SET_SLAVE_WAKEUP(usb_ld->pdata, 1); cnt++; } if (pipe_data->disconnected) { SET_SLAVE_WAKEUP(usb_ld->pdata, 0); pm_runtime_put_autosuspend(&usbdev->dev); return -ENODEV; } mif_debug("wait_q done (runtime_status=%d)\n", usbdev->dev.power.runtime_status); } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { mif_err("alloc urb error\n"); if (pm_runtime_put_autosuspend(&usbdev->dev) < 0) mif_debug("pm_runtime_put_autosuspend fail\n"); return -ENOMEM; } urb->transfer_flags = URB_ZERO_PACKET; usb_fill_bulk_urb(urb, usbdev, pipe_data->tx_pipe, skb->data, skb->len, usb_tx_complete, (void *)skb); spin_lock_irqsave(&usb_ld->lock, flags); if (atomic_read(&usb_ld->suspend_count)) { /* transmission will be done in resume */ usb_anchor_urb(urb, &usb_ld->deferred); usb_put_urb(urb); mif_debug("anchor urb (0x%p)\n", urb); spin_unlock_irqrestore(&usb_ld->lock, flags); return 0; } spin_unlock_irqrestore(&usb_ld->lock, flags); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { mif_err("usb_submit_urb with ret(%d)\n", ret); if (pm_runtime_put_autosuspend(&usbdev->dev) < 0) mif_debug("pm_runtime_put_autosuspend fail\n"); } return ret; }
int data_bridge_write(unsigned int id, struct sk_buff *skb) { int result; int size = skb->len; int pending; struct urb *txurb; struct timestamp_info *info = (struct timestamp_info *)skb->cb; struct data_bridge *dev = __dev[id]; struct bridge *brdg; if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf)) return -ENODEV; brdg = dev->brdg; if (!brdg) return -ENODEV; dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len); result = usb_autopm_get_interface(dev->intf); if (result < 0) { dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__); goto pm_error; } txurb = usb_alloc_urb(0, GFP_KERNEL); if (!txurb) { dev_err(&dev->intf->dev, "%s: error allocating read urb\n", __func__); result = -ENOMEM; goto error; } /* store dev pointer in skb */ info->dev = dev; info->tx_queued = get_timestamp(); usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out, skb->data, skb->len, data_bridge_write_cb, skb); txurb->transfer_flags |= URB_ZERO_PACKET; pending = atomic_inc_return(&dev->pending_txurbs); usb_anchor_urb(txurb, &dev->tx_active); if (atomic_read(&dev->pending_txurbs) % tx_urb_mult) txurb->transfer_flags |= URB_NO_INTERRUPT; result = usb_submit_urb(txurb, GFP_KERNEL); if (result < 0) { usb_unanchor_urb(txurb); atomic_dec(&dev->pending_txurbs); dev_err(&dev->intf->dev, "%s: submit URB error %d\n", __func__, result); goto free_urb; } dev->to_modem++; dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending); /* flow control: last urb submitted but return -EBUSY */ if (fctrl_support && pending > fctrl_en_thld) { set_bit(TX_THROTTLED, &brdg->flags); dev->tx_throttled_cnt++; pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n", __func__, pending); return -EBUSY; } return size; free_urb: usb_free_urb(txurb); error: dev->txurb_drp_cnt++; usb_autopm_put_interface(dev->intf); pm_error: return result; }
static void notification_available_cb(struct urb *urb) { int status; struct usb_cdc_notification *ctrl; struct ctrl_bridge *dev = urb->context; struct bridge *brdg = dev->brdg; unsigned int ctrl_bits; unsigned char *data; switch (urb->status) { case 0: /*if non zero lenght of data received while unlink*/ case -ENOENT: /*success*/ break; case -ESHUTDOWN: case -ECONNRESET: case -EPROTO: /* unplug */ return; case -EPIPE: dev_err(&dev->intf->dev, "%s: stall on int endpoint\n", __func__); /* TBD : halt to be cleared in work */ case -EOVERFLOW: default: pr_debug_ratelimited("%s: non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } if (!urb->actual_length) return; ctrl = (struct usb_cdc_notification *)urb->transfer_buffer; data = (unsigned char *)(ctrl + 1); switch (ctrl->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev->resp_avail++; //pr_info("GOT notification \n"); usb_mark_last_busy(dev->udev); if (urb->status == -ENOENT) pr_info("URB status is ENOENT"); queue_work(dev->wq, &dev->get_encap_work); return; case USB_CDC_NOTIFY_NETWORK_CONNECTION: dev_dbg(&dev->intf->dev, "%s network\n", ctrl->wValue ? "connected to" : "disconnected from"); break; case USB_CDC_NOTIFY_SERIAL_STATE: dev->notify_ser_state++; ctrl_bits = get_unaligned_le16(data); dev_dbg(&dev->intf->dev, "serial state: %d\n", ctrl_bits); dev->cbits_tohost = ctrl_bits; if (brdg && brdg->ops.send_cbits) brdg->ops.send_cbits(brdg->ctx, ctrl_bits); break; default: dev_err(&dev->intf->dev, "%s: unknown notification %d received:" "index %d len %d data0 %d data1 %d", __func__, ctrl->bNotificationType, ctrl->wIndex, ctrl->wLength, data[0], data[1]); } resubmit_int_urb: usb_anchor_urb(urb, &dev->tx_submitted); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&dev->intf->dev, "%s: Error re-submitting Int URB %d\n", __func__, status); usb_unanchor_urb(urb); } }
/* Write */ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct sierra_port_private *portdata; struct sierra_intf_private *intfdata; struct usb_serial *serial = port->serial; unsigned long flags; unsigned char *buffer; struct urb *urb; size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER); int retval = 0; /* verify that we actually have some data to write */ if (count == 0) return 0; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(serial); dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize); spin_lock_irqsave(&portdata->lock, flags); dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); if (portdata->outstanding_urbs > portdata->num_out_urbs) { spin_unlock_irqrestore(&portdata->lock, flags); dev_dbg(&port->dev, "%s - write limit hit\n", __func__); return 0; } portdata->outstanding_urbs++; dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); retval = usb_autopm_get_interface_async(serial->interface); if (retval < 0) { spin_lock_irqsave(&portdata->lock, flags); portdata->outstanding_urbs--; spin_unlock_irqrestore(&portdata->lock, flags); goto error_simple; } buffer = kmalloc(writesize, GFP_ATOMIC); if (!buffer) { retval = -ENOMEM; goto error_no_buffer; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { retval = -ENOMEM; goto error_no_urb; } memcpy(buffer, buf, writesize); usb_serial_debug_data(&port->dev, __func__, writesize, buffer); usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buffer, writesize, sierra_outdat_callback, port); /* Handle the need to send a zero length packet */ urb->transfer_flags |= URB_ZERO_PACKET; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); goto skip_power; } else { usb_anchor_urb(urb, &portdata->active); } /* send it down the pipe */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_unanchor_urb(urb); spin_unlock_irqrestore(&intfdata->susp_lock, flags); dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, retval); goto error; } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); } skip_power: /* we are done with this urb, so let the host driver * really free it when it is finished with it */ usb_free_urb(urb); return writesize; error: usb_free_urb(urb); error_no_urb: kfree(buffer); error_no_buffer: spin_lock_irqsave(&portdata->lock, flags); --portdata->outstanding_urbs; dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); usb_autopm_put_interface_async(serial->interface); error_simple: return retval; }
s32 rtl8192cu_hostap_mgnt_xmit_entry(_adapter *padapter, _pkt *pkt) { #ifdef PLATFORM_LINUX u16 fc; int rc, len, pipe; unsigned int bmcst, tid, qsel; struct sk_buff *skb, *pxmit_skb; struct urb *urb; unsigned char *pxmitbuf; struct tx_desc *ptxdesc; struct rtw_ieee80211_hdr *tx_hdr; struct hostapd_priv *phostapdpriv = padapter->phostapdpriv; struct net_device *pnetdev = padapter->pnetdev; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); //DBG_8192C("%s\n", __FUNCTION__); skb = pkt; len = skb->len; tx_hdr = (struct rtw_ieee80211_hdr *)(skb->data); fc = le16_to_cpu(tx_hdr->frame_ctl); bmcst = IS_MCAST(tx_hdr->addr1); if ((fc & RTW_IEEE80211_FCTL_FTYPE) != RTW_IEEE80211_FTYPE_MGMT) goto _exit; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html pxmit_skb = dev_alloc_skb(len + TXDESC_SIZE); #else pxmit_skb = netdev_alloc_skb(pnetdev, len + TXDESC_SIZE); #endif if(!pxmit_skb) goto _exit; pxmitbuf = pxmit_skb->data; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { goto _exit; } // ----- fill tx desc ----- ptxdesc = (struct tx_desc *)pxmitbuf; _rtw_memset(ptxdesc, 0, sizeof(*ptxdesc)); //offset 0 ptxdesc->txdw0 |= cpu_to_le32(len&0x0000ffff); ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);//default = 32 bytes for TX Desc ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); if(bmcst) { ptxdesc->txdw0 |= cpu_to_le32(BIT(24)); } //offset 4 ptxdesc->txdw1 |= cpu_to_le32(0x00);//MAC_ID ptxdesc->txdw1 |= cpu_to_le32((0x12<<QSEL_SHT)&0x00001f00); ptxdesc->txdw1 |= cpu_to_le32((0x06<< 16) & 0x000f0000);//b mode //offset 8 //offset 12 ptxdesc->txdw3 |= cpu_to_le32((le16_to_cpu(tx_hdr->seq_ctl)<<16)&0xffff0000); //offset 16 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate //offset 20 //HW append seq ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29. rtl8192cu_cal_txdesc_chksum(ptxdesc); // ----- end of fill tx desc ----- // skb_put(pxmit_skb, len + TXDESC_SIZE); pxmitbuf = pxmitbuf + TXDESC_SIZE; _rtw_memcpy(pxmitbuf, skb->data, len); //DBG_8192C("mgnt_xmit, len=%x\n", pxmit_skb->len); // ----- prepare urb for submit ----- //translate DMA FIFO addr to pipehandle //pipe = ffaddr2pipehdl(pdvobj, MGT_QUEUE_INX); pipe = usb_sndbulkpipe(pdvobj->pusbdev, pHalData->Queue2EPNum[(u8)MGT_QUEUE_INX]&0x0f); usb_fill_bulk_urb(urb, pdvobj->pusbdev, pipe, pxmit_skb->data, pxmit_skb->len, rtl8192cu_hostap_mgnt_xmit_cb, pxmit_skb); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &phostapdpriv->anchored); rc = usb_submit_urb(urb, GFP_ATOMIC); if (rc < 0) { usb_unanchor_urb(urb); kfree_skb(skb); } usb_free_urb(urb); _exit: dev_kfree_skb_any(skb); #endif return 0; }
static int skel_write(struct file *file, const char *user_buffer, int count, void *ppos) { struct usb_skel *dev; int retval = 0; struct urb *urb = NULL; char *buf = NULL; int writesize = min(count, (int)MAX_TRANSFER); dev = file->private_data; /* verify that we actually have some data to write */ if (count == 0) goto exit; /* * limit the number of URBs in flight to stop a user from using up all * RAM */ if (!(file->f_flags & O_NONBLOCK)) { if (down_interruptible(&dev->limit_sem)) { retval = -ERESTARTSYS; goto exit; } } else { if (down_trylock(&dev->limit_sem)) { retval = -EAGAIN; goto exit; } } spin_lock_irq(&dev->err_lock); retval = dev->errors; if (retval < 0) { /* any error is reported once */ dev->errors = 0; /* to preserve notifications about reset */ retval = (retval == -EPIPE) ? retval : -EIO; } spin_unlock_irq(&dev->err_lock); if (retval < 0) goto error; /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL, &urb->transfer_dma); if (!buf) { retval = -ENOMEM; goto error; } if (copy_from_user(buf, user_buffer, writesize)) { retval = -EFAULT; goto error; } /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); if (!dev->interface) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; } /* initialize the urb properly */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), buf, writesize, skel_write_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->submitted); /* send the data out the bulk port */ retval = usb_submit_urb(urb, GFP_KERNEL); mutex_unlock(&dev->io_mutex); if (retval) { dev_err(&dev->interface->dev, "%s - failed submitting write urb, error %d\n", __func__, retval); goto error_unanchor; } /* * release our reference to this urb, the USB core will eventually free * it entirely */ usb_free_urb(urb); return writesize; error_unanchor: usb_unanchor_urb(urb); error: if (urb) { usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma); usb_free_urb(urb); } up(&dev->limit_sem); exit: return retval; }
static void notification_available_cb(struct urb *urb) { int status; struct usb_cdc_notification *ctrl; struct usb_device *udev; struct ctrl_bridge *dev = urb->context; struct bridge *brdg = dev->brdg; unsigned int ctrl_bits; unsigned char *data; udev = interface_to_usbdev(dev->intf); switch (urb->status) { case 0: /*success*/ break; case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: case -EPROTO: /* unplug */ return; case -EPIPE: dev_err(&udev->dev, "%s: stall on int endpoint\n", __func__); /* TBD : halt to be cleared in work */ case -EOVERFLOW: default: pr_debug_ratelimited("%s: non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } ctrl = (struct usb_cdc_notification *)urb->transfer_buffer; data = (unsigned char *)(ctrl + 1); switch (ctrl->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev->resp_avail++; usb_fill_control_urb(dev->readurb, udev, usb_rcvctrlpipe(udev, 0), (unsigned char *)dev->in_ctlreq, dev->readbuf, DEFAULT_READ_URB_LENGTH, resp_avail_cb, dev); usb_anchor_urb(dev->readurb, &dev->tx_submitted); status = usb_submit_urb(dev->readurb, GFP_ATOMIC); if (status) { dev_err(&udev->dev, "%s: Error submitting Read URB %d\n", __func__, status); usb_unanchor_urb(dev->readurb); goto resubmit_int_urb; } return; case USB_CDC_NOTIFY_NETWORK_CONNECTION: dev_dbg(&udev->dev, "%s network\n", ctrl->wValue ? "connected to" : "disconnected from"); break; case USB_CDC_NOTIFY_SERIAL_STATE: dev->notify_ser_state++; ctrl_bits = get_unaligned_le16(data); dev_dbg(&udev->dev, "serial state: %d\n", ctrl_bits); dev->cbits_tohost = ctrl_bits; if (brdg && brdg->ops.send_cbits) brdg->ops.send_cbits(brdg->ctx, ctrl_bits); break; default: dev_err(&udev->dev, "%s: unknown notification %d received:" "index %d len %d data0 %d data1 %d", __func__, ctrl->bNotificationType, ctrl->wIndex, ctrl->wLength, data[0], data[1]); } resubmit_int_urb: usb_anchor_urb(urb, &dev->tx_submitted); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&udev->dev, "%s: Error re-submitting Int URB %d\n", __func__, status); usb_unanchor_urb(urb); } }
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; int retval; // some devices want funky USB-level framing, for // win32 driver (usually) and/or hardware quirks if (info->tx_fixup) { skb = info->tx_fixup (dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err(dev)) { netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); goto drop; } else { /* cdc_ncm collected packet; waits for more */ goto not_drop; } } } length = skb->len; if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { netif_dbg(dev, tx_err, dev->net, "no urb\n"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. * NOTE2: CDC NCM specification is different from CDC ECM when * handling ZLP/short packets, so cdc_ncm driver will make short * packet itself if needed. */ if (length % dev->maxpacket == 0) { if (!(info->flags & FLAG_SEND_ZLP)) { if (!(info->flags & FLAG_MULTI_PACKET)) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } } else urb->transfer_flags |= URB_ZERO_PACKET; } spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); if (retval < 0) { spin_unlock_irqrestore(&dev->txq.lock, flags); goto drop; } #ifdef CONFIG_PM /* if this triggers the device is still a sleep */ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { /* transmission will be done in resume */ usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */ netif_stop_queue(net); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred; } #endif switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue (net); usbnet_defer_kevent (dev, EVENT_TX_HALT); usb_autopm_put_interface_async(dev->intf); break; default: usb_autopm_put_interface_async(dev->intf); netif_dbg(dev, tx_err, dev->net, "tx: submit urb err %d\n", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail (&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } spin_unlock_irqrestore (&dev->txq.lock, flags); if (retval) { netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); drop: dev->net->stats.tx_dropped++; not_drop: if (skb) dev_kfree_skb_any (skb); usb_free_urb (urb); } else netif_dbg(dev, tx_queued, dev->net, "> tx, len %d, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM deferred: #endif return NETDEV_TX_OK; }
int ctrl_bridge_write(unsigned int id, char *data, size_t size) { int result; struct urb *writeurb; struct usb_ctrlrequest *out_ctlreq; struct usb_device *udev; struct ctrl_bridge *dev; if (id >= MAX_BRIDGE_DEVICES) { result = -EINVAL; goto free_data; } dev = __dev[id]; if (!dev) { result = -ENODEV; goto free_data; } udev = interface_to_usbdev(dev->intf); dev_dbg(&udev->dev, "%s:[id]:%u: write (%d bytes)\n", __func__, id, size); writeurb = usb_alloc_urb(0, GFP_ATOMIC); if (!writeurb) { dev_err(&udev->dev, "%s: error allocating read urb\n", __func__); result = -ENOMEM; goto free_data; } out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_ATOMIC); if (!out_ctlreq) { dev_err(&udev->dev, "%s: error allocating setup packet buffer\n", __func__); result = -ENOMEM; goto free_urb; } /* CDC Send Encapsulated Request packet */ out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); if (!data && !size) { out_ctlreq->bRequest = USB_CDC_REQ_SET_CONTROL_LINE_STATE; out_ctlreq->wValue = dev->cbits_tomdm; dev->set_ctrl_line_sts++; } else { out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; out_ctlreq->wValue = 0; dev->snd_encap_cmd++; } out_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber; out_ctlreq->wLength = cpu_to_le16(size); usb_fill_control_urb(writeurb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)out_ctlreq, (void *)data, size, ctrl_write_callback, dev); result = usb_autopm_get_interface_async(dev->intf); if (result < 0) { dev_err(&udev->dev, "%s: unable to resume interface: %d\n", __func__, result); /* * Revisit: if (result == -EPERM) * bridge_suspend(dev->intf, PMSG_SUSPEND); */ goto free_ctrlreq; } if (test_bit(SUSPENDED, &dev->flags)) { usb_anchor_urb(writeurb, &dev->tx_deferred); goto deferred; } usb_anchor_urb(writeurb, &dev->tx_submitted); result = usb_submit_urb(writeurb, GFP_ATOMIC); if (result < 0) { dev_err(&udev->dev, "%s: submit URB error %d\n", __func__, result); usb_autopm_put_interface_async(dev->intf); goto unanchor_urb; } deferred: return size; unanchor_urb: usb_unanchor_urb(writeurb); free_ctrlreq: kfree(out_ctlreq); free_urb: usb_free_urb(writeurb); free_data: kfree(data); return result; }
int diag_bridge_write(char *data, int size) { struct urb *urb = NULL; unsigned int pipe; struct diag_bridge *dev = __dev; int ret; pr_debug("writing %d bytes", size); if (!dev || !dev->ifc) { pr_err("device is disconnected"); return -ENODEV; } if (!dev->ops) { pr_err("bridge is not open"); return -ENODEV; } if (!size) { dev_err(&dev->ifc->dev, "invalid size:%d\n", size); return -EINVAL; } /* if there was a previous unrecoverable error, just quit */ if (dev->err) return -ENODEV; kref_get(&dev->kref); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&dev->ifc->dev, "unable to allocate urb\n"); ret = -ENOMEM; goto error; } ret = usb_autopm_get_interface(dev->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("write: autopm_get failed:%d", ret); goto free_error; } pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr); usb_fill_bulk_urb(urb, dev->udev, pipe, data, size, diag_bridge_write_cb, dev); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &dev->submitted); dev->pending_writes++; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { #if defined(CONFIG_LGE_HANDLE_PANIC)//unchol.park if(lge_pm_get_cable_type() == CABLE_130K) printk("[MDM TEST] usb_submit_urb failed. ret = %d\n", ret); #endif pr_err_ratelimited("submitting urb failed err:%d", ret); dev->pending_writes--; usb_unanchor_urb(urb); usb_autopm_put_interface(dev->ifc); goto free_error; } free_error: usb_free_urb(urb); error: if (ret) /* otherwise this is done in the completion handler */ kref_put(&dev->kref, diag_bridge_delete); return ret; }
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .noise = -98, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } rtl_is_special_data(hw, skb, false); if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .noise = -98, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } rtl_is_special_data(hw, skb, false); if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } if (likely(rtl_action_proc(hw, skb, false))) { struct sk_buff *uskb = NULL; u8 *pdata; uskb = dev_alloc_skb(skb->len + 128); if (uskb) { /* drop packet on allocation failure */ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); pdata = (u8 *)skb_put(uskb, skb->len); memcpy(pdata, skb->data, skb->len); ieee80211_rx_irqsafe(hw, uskb); } dev_kfree_skb_any(skb); } else { dev_kfree_skb_any(skb); } } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, _skb); ieee80211_rx_irqsafe(hw, _skb); } } static void _rtl_rx_completed(struct urb *_urb) { struct sk_buff *skb = (struct sk_buff *)_urb->context; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { /* If this code were moved to work queue, would CPU * utilization be improved? NOTE: We shall allocate another skb * and reuse the original one. */ skb_put(skb, _urb->actual_length); if (likely(!rtlusb->usb_rx_segregate_hdl)) { struct sk_buff *_skb; _rtl_usb_rx_process_noagg(hw, skb); _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC); if (IS_ERR(_skb)) { err = PTR_ERR(_skb); RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Can't allocate skb for bulk IN!\n"); return; } skb = _skb; } else{ /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: skb_reset_tail_pointer(skb); skb_trim(skb, 0); usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: dev_kfree_skb_irq(skb); } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; struct sk_buff *skb; int err; int i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to alloc URB!!\n"); goto err_out; } skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (IS_ERR(skb)) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to prep_rx_urb!!\n"); err = PTR_ERR(skb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) goto err_out; usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ _rtl_usb_receive(hw); } return err; } /** * * */ /*======================= tx =========================================*/ static void rtl_usb_cleanup(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; SET_USB_STOP(rtlusb); /* clean up rx stuff. */ usb_kill_anchored_urbs(&rtlusb->rx_submitted); /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); txinfo = IEEE80211_SKB_CB(_skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, _skb); } usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); } usb_kill_anchored_urbs(&rtlusb->tx_submitted); }
static int gs_can_open(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; int rc, i; struct gs_device_mode *dm; u32 ctrlmode; rc = open_candev(netdev); if (rc) return rc; if (atomic_add_return(1, &parent->active_channels) == 1) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URB\n"); return -ENOMEM; } /* alloc rx buffer */ buf = usb_alloc_coherent(dev->udev, sizeof(struct gs_host_frame), GFP_KERNEL, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); return -ENOMEM; } /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, GSUSB_ENDPOINT_IN), buf, sizeof(struct gs_host_frame), gs_usb_recieve_bulk_callback, parent); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &parent->rx_submitted); rc = usb_submit_urb(urb, GFP_KERNEL); if (rc) { if (rc == -ENODEV) netif_device_detach(dev->netdev); netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); usb_unanchor_urb(urb); break; } /* Drop reference, * USB core will take care of freeing it */ usb_free_urb(urb); } } dm = kmalloc(sizeof(*dm), GFP_KERNEL); if (!dm) return -ENOMEM; /* flags */ ctrlmode = dev->can.ctrlmode; dm->flags = 0; if (ctrlmode & CAN_CTRLMODE_LOOPBACK) dm->flags |= GS_CAN_MODE_LOOP_BACK; else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) dm->flags |= GS_CAN_MODE_LISTEN_ONLY; /* Controller is not allowed to retry TX * this mode is unavailable on atmels uc3c hardware */ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) dm->flags |= GS_CAN_MODE_ONE_SHOT; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ dm->mode = GS_CAN_MODE_START; rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, dev->channel, 0, dm, sizeof(*dm), 1000); if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); return rc; } kfree(dm); dev->can.state = CAN_STATE_ERROR_ACTIVE; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); return 0; }
static int bpa10x_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct bpa10x_data *data = hdev->driver_data; struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; /* Prepend skb with frame type */ *skb_push(skb, 1) = bt_cb(skb)->pkt_type; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = USB_TYPE_VENDOR; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.sco_tx++; break; default: usb_free_urb(urb); return -EILSEQ; } usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } usb_free_urb(urb); return 0; }
static void notification_available_cb(struct urb *urb) { int status; struct usb_cdc_notification *ctrl; struct usb_device *udev; struct rmnet_ctrl_dev *dev = urb->context; unsigned int iface_num; udev = interface_to_usbdev(dev->intf); iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber; switch (urb->status) { case 0: /*if non zero lenght of data received while unlink*/ case -ENOENT: DBG_NOTI("[NACB:%d]<", iface_num); /*success*/ break; /*do not resubmit*/ case -ESHUTDOWN: case -ECONNRESET: case -EPROTO: return; case -EPIPE: pr_err_ratelimited("%s: Stall on int endpoint\n", __func__); /* TBD : halt to be cleared in work */ return; /*resubmit*/ case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } if (!urb->actual_length) { pr_err("Received Zero actual length: %d", urb->actual_length); return; } ctrl = urb->transfer_buffer; switch (ctrl->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev->resp_avail_cnt++; usb_mark_last_busy(udev); if (urb->status == -ENOENT) pr_info("URB status is ENOENT"); queue_work(dev->wq, &dev->get_encap_work); if (!dev->resp_available) { dev->resp_available = true; wake_up(&dev->open_wait_queue); } return; default: dev_err(dev->devicep, "%s:Command not implemented\n", __func__); } resubmit_int_urb: usb_anchor_urb(urb, &dev->rx_submitted); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { usb_unanchor_urb(urb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } return; }
static int data_bridge_write_asus(unsigned int id, struct sk_buff *skb) { int result; int size = skb->len; int pending; struct urb *txurb; struct timestamp_info *info = (struct timestamp_info *)skb->cb; struct data_bridge *dev = __dev[id]; if (!dev || dev->err || !usb_get_intfdata(dev->intf)) return -ENODEV; dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len); result = usb_autopm_get_interface(dev->intf); if (result < 0) { dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__); goto pm_error; } txurb = usb_alloc_urb(0, GFP_KERNEL); if (!txurb) { dev_err(&dev->intf->dev, "%s: error allocating read urb\n", __func__); result = -ENOMEM; goto error; } /* store dev pointer in skb */ info->dev = dev; info->tx_queued = get_timestamp(); usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out, skb->data, skb->len, data_bridge_write_cb, skb); txurb->transfer_flags |= URB_ZERO_PACKET; pending = atomic_inc_return(&dev->pending_txurbs); usb_anchor_urb(txurb, &dev->tx_active); if (atomic_read(&dev->pending_txurbs) % tx_urb_mult) txurb->transfer_flags |= URB_NO_INTERRUPT; result = usb_submit_urb(txurb, GFP_KERNEL); if (result < 0) { usb_unanchor_urb(txurb); atomic_dec(&dev->pending_txurbs); dev_err(&dev->intf->dev, "%s: submit URB error %d\n", __func__, result); goto free_urb; } dev->to_modem++; dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending); return size; free_urb: usb_free_urb(txurb); error: dev->txurb_drp_cnt++; usb_autopm_put_interface(dev->intf); pm_error: return result; }
static int rmnet_usb_ctrl_write(struct rmnet_ctrl_dev *dev, char *buf, size_t size) { int result; struct urb *sndurb; struct usb_ctrlrequest *out_ctlreq; struct usb_device *udev; if (!is_dev_connected(dev)) return -ENETRESET; udev = interface_to_usbdev(dev->intf); sndurb = usb_alloc_urb(0, GFP_KERNEL); if (!sndurb) { dev_err(dev->devicep, "Error allocating read urb\n"); return -ENOMEM; } out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_KERNEL); if (!out_ctlreq) { usb_free_urb(sndurb); dev_err(dev->devicep, "Error allocating setup packet buffer\n"); return -ENOMEM; } /* CDC Send Encapsulated Request packet */ out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; out_ctlreq->wValue = 0; out_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber; out_ctlreq->wLength = cpu_to_le16(size); usb_fill_control_urb(sndurb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)out_ctlreq, (void *)buf, size, ctrl_write_callback, dev); result = usb_autopm_get_interface(dev->intf); if (result < 0) { dev_dbg(dev->devicep, "%s: Unable to resume interface: %d\n", __func__, result); /* * Revisit: if (result == -EPERM) * rmnet_usb_suspend(dev->intf, PMSG_SUSPEND); */ usb_free_urb(sndurb); kfree(out_ctlreq); return result; } usb_anchor_urb(sndurb, &dev->tx_submitted); dev->snd_encap_cmd_cnt++; result = usb_submit_urb(sndurb, GFP_KERNEL); if (result < 0) { dev_err(dev->devicep, "%s: Submit URB error %d\n", __func__, result); dev->snd_encap_cmd_cnt--; usb_autopm_put_interface(dev->intf); usb_unanchor_urb(sndurb); usb_free_urb(sndurb); kfree(out_ctlreq); return result; } return size; }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; bool put = true; ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0) { if (ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("%s: autopm_get failed:%d", ksb->fs_dev.name, ret); return; } put = false; } for (i = 0; i < NO_RX_REQS; i++) { if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) break; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { dev_err(&ksb->udev->dev, "unable to allocate data pkt"); break; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&ksb->udev->dev, "unable to allocate urb"); ksb_free_data_pkt(pkt); break; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&ksb->udev->dev, "in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); break; } usb_free_urb(urb); } if (put) usb_autopm_put_interface_async(ksb->ifc); }