static int sipc4_check_data(struct sipc4_rx_data *data, char *buf, int rest, const __be16 protocol) { struct sk_buff *skb = data->skb; struct sipc4_rx_frag *frag = (struct sipc4_rx_frag *)skb->cb; struct page *page; int header_size = sipc4_get_header_size(data->format); int hdlc_size = sipc4_get_hdlc_size(skb->data, data->format); int data_size = hdlc_size - header_size; int skb_data_size = skb->data_len + frag->data_len; int rest_data_size = data_size - skb_data_size; int len; int skb_max_pages = MAX_SKB_FRAGS; len = rest < rest_data_size ? rest : rest_data_size; page = __netdev_alloc_page(data->dev, GFP_ATOMIC); if (!page) { pr_err("%s - failed to alloc netdev page\n", __func__); return -ENOMEM; } if (data->format == SIPC4_RFS) skb_max_pages = 1; /* check fragment number */ if (skb_shinfo(skb)->nr_frags >= skb_max_pages) { struct sk_buff *skb_new; int err; skb_new = sipc4_alloc_skb(data, header_size); if (!skb) { pr_err("%s - failed to alloc skb\n", __func__); return -ENOMEM; } memcpy(skb_put(skb_new, header_size), skb->data, header_size); memcpy(skb_new->cb, skb->cb, sizeof(struct sipc4_rx_frag)); frag = (struct sipc4_rx_frag *)skb_new->cb; frag->data_len += skb->data_len; err = sipc4_hdlc_format_rx(data, protocol); if (err < 0) { pr_err("%s - failed to rx data\n", __func__); dev_kfree_skb_any(skb_new); return err; } skb = data->skb = skb_new; } /* handle data */ memcpy(page_address(page), buf, len); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, len); return len; }
static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) { struct net_device *dev = pnd->dev; struct page *page; int err; page = __netdev_alloc_page(dev, gfp_flags); if (!page) return -ENOMEM; usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page), PAGE_SIZE, rx_complete, dev); req->transfer_flags = 0; err = usb_submit_urb(req, gfp_flags); if (unlikely(err)) { dev_dbg(&dev->dev, "RX submit error (%d)\n", err); netdev_free_page(dev, page); } return err; }
static int rx_submit(struct usbsvn *svn, int dev_id, struct urb *req, gfp_t gfp_flags) { struct net_device *dev = svn->netdev; struct usbsvn_devdata *devdata = &svn->devdata[dev_id]; struct usbsvn_rx *svn_rx; struct page *page; int err; svn_rx = kzalloc(sizeof(struct usbsvn_rx), gfp_flags); if (!svn_rx) return -ENOMEM; page = __netdev_alloc_page(dev, gfp_flags); if (!page) { kfree(svn_rx); return -ENOMEM; } svn_rx->netdev = dev; svn_rx->dev_id = dev_id; usb_fill_bulk_urb(req, svn->usbdev, devdata->rx_pipe, page_address(page), PAGE_SIZE, rx_complete, svn_rx); req->transfer_flags = 0; err = usb_submit_urb(req, gfp_flags); if (unlikely(err)) { dev_err(&dev->dev, "RX submit error (%d)\n", err); kfree(svn_rx); netdev_free_page(dev, page); } usb_mark_last_busy(req->dev); return err; }