static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); smd_channel_t *ch = p->ch; #if fcENABLE_FLOW_CTRL int res; int iAvail = smd_write_avail(ch); #endif #if fcENABLE_FLOW_CTRL if ((res = smd_write_atomic(ch, skb->data, skb->len)) != skb->len) { pr_err("rmnet fifo full, dropping packet: %d (%d,%d), fifo size = %d\n", res, skb->len, iAvail, smd_total_fifo_size(ch)); rmnet_Throttle(dev); #else if (smd_write_atomic(ch, skb->data, skb->len) != skb->len) { pr_err("rmnet fifo full, dropping packet, fifo size = %d\n", smd_total_fifo_size(ch)); #endif } else { #if fcENABLE_FLOW_CTRL if (iAvail < cTHR_RMNET_FIFO) { pr_devel(LOG_TAG1 "rmnet fifo almost full: %d (%d,%d), tx paused\n", res, skb->len, iAvail); rmnet_Throttle(dev); } #endif if (count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } } dev_kfree_skb_irq(skb); return 0; } static struct net_device_stats *rmnet_get_stats(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); return &p->stats; } static void rmnet_set_multicast_list(struct net_device *dev) { } static void rmnet_tx_timeout(struct net_device *dev) { pr_info("rmnet_tx_timeout()\n"); rmnet_check_fifo(dev); }
static void rmnet_check_fifo(struct net_device *dev) { #if fcENABLE_FLOW_CTRL if (bRmnetFifoFull) { struct rmnet_private *p = netdev_priv(dev); int iAvail = smd_write_avail(p->ch); if (iAvail > (smd_total_fifo_size(p->ch) / 2)) { pr_devel(LOG_TAG1 "%s@%d: tx resumed\n", __func__, __LINE__); if (netif_carrier_ok(dev)) netif_wake_queue(dev); else pr_err(LOG_TAG1 "%s@%d: no netif_carrier_ok\n", __func__, __LINE__); bRmnetFifoFull = 0; } } #endif }