static void cdc_ncm_txpath_bh(unsigned long param) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param; spin_lock_bh(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (ctx->netdev != NULL) { spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(ctx->netdev); usbnet_start_xmit(NULL, ctx->netdev); netif_tx_unlock_bh(ctx->netdev); } else { spin_unlock_bh(&ctx->mtx); } }
static void cdc_ncm_txpath_bh(unsigned long param) { struct if_usb_devdata *pipe_data = (struct if_usb_devdata *)param; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)pipe_data->sedata; if (!ctx) return; spin_lock_bh(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (pipe_data->iod->ndev != NULL) { spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(pipe_data->iod->ndev); usb_tx_skb(pipe_data, NULL); netif_tx_unlock_bh(pipe_data->iod->ndev); } else { spin_unlock_bh(&ctx->mtx); } }
struct sk_buff * cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign) { struct usb_cdc_ncm_nth16 *nth16; struct usb_cdc_ncm_ndp16 *ndp16; struct sk_buff *skb_out; u16 n = 0, index, ndplen; u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) { swap(skb, ctx->tx_rem_skb); swap(sign, ctx->tx_rem_sign); } else { ready2send = 1; } /* check if we are resuming an OUT skb */ skb_out = ctx->tx_curr_skb; /* allocate a new OUT skb */ if (!skb_out) { skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC); if (skb_out == NULL) { if (skb != NULL) { dev_kfree_skb_any(skb); ctx->netdev->stats.tx_dropped++; } goto exit_no_skb; } /* fill out the initial 16-bit NTB header */ nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16)); nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); nth16->wSequence = cpu_to_le16(ctx->tx_seq++); /* count total number of frames in this NTB */ ctx->tx_curr_frame_num = 0; } for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { /* send any remaining skb first */ if (skb == NULL) { skb = ctx->tx_rem_skb; sign = ctx->tx_rem_sign; ctx->tx_rem_skb = NULL; /* check for end of skb */ if (skb == NULL) break; } /* get the appropriate NDP for this skb */ ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder); /* align beginning of next frame */ cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); /* check if we had enough room left for both NDP and frame */ if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { if (n == 0) { /* won't fit, MTU problem? */ dev_kfree_skb_any(skb); skb = NULL; ctx->netdev->stats.tx_dropped++; } else { /* no room for skb - store for later */ if (ctx->tx_rem_skb != NULL) { dev_kfree_skb_any(ctx->tx_rem_skb); ctx->netdev->stats.tx_dropped++; } ctx->tx_rem_skb = skb; ctx->tx_rem_sign = sign; skb = NULL; ready2send = 1; } break; } /* calculate frame number withing this NDP */ ndplen = le16_to_cpu(ndp16->wLength); index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1; /* OK, add this skb */ ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len); ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len); ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16)); memcpy(skb_put(skb_out, skb->len), skb->data, skb->len); dev_kfree_skb_any(skb); skb = NULL; /* send now if this NDP is full */ if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) { ready2send = 1; break; } } /* free up any dangling skb */ if (skb != NULL) { dev_kfree_skb_any(skb); skb = NULL; ctx->netdev->stats.tx_dropped++; } ctx->tx_curr_frame_num = n; if (n == 0) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; goto exit_no_skb; } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; /* set the pending count */ if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT) ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT; goto exit_no_skb; } else { /* frame goes out */ /* variables will be reset at next call */ } /* * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes, * we send buffers as it is. If we get more data, it would be more * efficient for USB HS mobile device with DMA engine to receive a full * size NTB, than canceling DMA transfer and receiving a short packet. */ if (skb_out->len > CDC_NCM_MIN_TX_PKT) /* final zero padding */ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len); /* do we need to prevent a ZLP? */ if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) && (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out)) *skb_put(skb_out, 1) = 0; /* force short packet */ /* set final frame length */ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; nth16->wBlockLength = cpu_to_le16(skb_out->len); /* return skb */ ctx->tx_curr_skb = NULL; ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num; return skb_out; exit_no_skb: /* Start timer, if there is a remaining skb */ if (ctx->tx_curr_skb != NULL) cdc_ncm_tx_timeout_start(ctx); return NULL; }
static struct sk_buff * cdc_ncm_fill_tx_frame(struct if_usb_devdata *pipe_data, struct sk_buff *skb) { struct sk_buff *skb_out; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)pipe_data->sedata; u32 rem; u32 offset; u32 last_offset; u16 n = 0, index; u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) swap(skb, ctx->tx_rem_skb); else ready2send = 1; /* * +----------------+ * | skb_out | * +----------------+ * ^ offset * ^ last_offset */ /* check if we are resuming an OUT skb */ if (ctx->tx_curr_skb != NULL) { /* pop variables */ skb_out = ctx->tx_curr_skb; offset = ctx->tx_curr_offset; last_offset = ctx->tx_curr_last_offset; n = ctx->tx_curr_frame_num; } else { /* restore tx_max */ ctx->tx_max = (ctx->tx_max_setup & 0xFFF) ? ctx->tx_max_setup : ctx->tx_max_setup - 512; retry_alloc: skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC); if (skb_out == NULL) { ctx->tx_max = ctx->tx_max / 2 - 256; if (ctx->tx_max >= PAGE_SIZE - 512) { mif_err("re-try to alloc %d\n", ctx->tx_max); goto retry_alloc; } if (skb != NULL) dev_kfree_skb_any(skb); goto exit_no_skb; } /* make room for NTH and NDP */ offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus) + sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16); /* store last valid offset before alignment */ last_offset = offset; /* align first Datagram offset correctly */ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder; /* zero buffer till the first IP datagram */ cdc_ncm_zero_fill(skb_out->data, 0, offset, offset); n = 0; ctx->tx_curr_frame_num = 0; } for (; n < ctx->tx_max_datagrams; n++) { /* check if end of transmit buffer is reached */ if (offset >= ctx->tx_max) { ready2send = 1; break; } /* compute maximum buffer size */ rem = ctx->tx_max - offset; if (skb == NULL) { skb = ctx->tx_rem_skb; ctx->tx_rem_skb = NULL; /* check for end of skb */ if (skb == NULL) break; } if (skb->len > rem) { if (n == 0) { /* won't fit, MTU problem? */ dev_kfree_skb_any(skb); skb = NULL; } else { /* no room for skb - store for later */ if (ctx->tx_rem_skb != NULL) dev_kfree_skb_any(ctx->tx_rem_skb); ctx->tx_rem_skb = skb; skb = NULL; ready2send = 1; } break; } memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len); ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len); ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset); /* update offset */ offset += skb->len; /* store last valid offset before alignment */ last_offset = offset; /* align offset correctly */ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder; /* zero padding */ cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max); dev_kfree_skb_any(skb); skb = NULL; } /* free up any dangling skb */ if (skb != NULL) { dev_kfree_skb_any(skb); skb = NULL; } ctx->tx_curr_frame_num = n; if (n == 0) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; ctx->tx_curr_offset = offset; ctx->tx_curr_last_offset = last_offset; goto exit_no_skb; } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; ctx->tx_curr_offset = offset; ctx->tx_curr_last_offset = last_offset; /* set the pending count */ if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT) ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT; goto exit_no_skb; } else { /* frame goes out */ /* variables will be reset at next call */ } /* check for overflow */ if (last_offset > ctx->tx_max) last_offset = ctx->tx_max; /* revert offset */ offset = last_offset; /* * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes, * we send buffers as it is. If we get more data, it would be more * efficient for USB HS mobile device with DMA engine to receive a full * size NTB, than canceling DMA transfer and receiving a short packet. */ if (offset > CDC_NCM_MIN_TX_PKT) offset = ctx->tx_max; /* final zero padding */ cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max); /* store last offset */ last_offset = offset; if (((last_offset < ctx->tx_max) && ((last_offset % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) || (((last_offset == ctx->tx_max) && ((ctx->tx_max % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) && (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) { /* force short packet */ *(((u8 *)skb_out->data) + last_offset) = 0; last_offset++; } /* zero the rest of the DPEs plus the last NULL entry */ for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) { ctx->tx_ncm.dpe16[n].wDatagramLength = 0; ctx->tx_ncm.dpe16[n].wDatagramIndex = 0; } /* fill out 16-bit NTB header */ ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); ctx->tx_ncm.nth16.wHeaderLength = cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus); ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index); memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); ctx->tx_seq++; /* fill out 16-bit NDP table */ ctx->tx_ncm.ndp16.dwSignature = cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN); rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * sizeof(struct usb_cdc_ncm_dpe16)); ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ memcpy(((u8 *)skb_out->data) + index, &(ctx->tx_ncm.ndp16), sizeof(ctx->tx_ncm.ndp16)); memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16), &(ctx->tx_ncm.dpe16), (ctx->tx_curr_frame_num + 1) * sizeof(struct usb_cdc_ncm_dpe16)); /* set frame length */ skb_put(skb_out, last_offset); /* return skb */ ctx->tx_curr_skb = NULL; pipe_data->iod->ndev->stats.tx_packets += ctx->tx_curr_frame_num; return skb_out; exit_no_skb: /* Start timer, if there is a remaining skb */ if (ctx->tx_curr_skb != NULL) cdc_ncm_tx_timeout_start(ctx); return NULL; }