void ieee802154_xmit_worker(struct work_struct *work) { struct ieee802154_local *local = container_of(work, struct ieee802154_local, tx_work); struct sk_buff *skb = local->tx_skb; struct net_device *dev = skb->dev; int res; rtnl_lock(); /* check if ifdown occurred while schedule */ if (!netif_running(dev)) goto err_tx; res = drv_xmit_sync(local, skb); if (res) goto err_tx; ieee802154_xmit_complete(&local->hw, skb, false); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; rtnl_unlock(); return; err_tx: /* Restart the netif queue on each sub_if_data object. */ ieee802154_wake_queue(&local->hw); rtnl_unlock(); kfree_skb(skb); netdev_dbg(dev, "transmission failed\n"); }
static void at86rf230_async_error_recover(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false); ieee802154_wake_queue(lp->hw); }
static void at86rf230_async_error_recover_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; if (ctx->free) kfree(ctx); ieee802154_wake_queue(lp->hw); }
static int ieee802154_resume(struct wpan_phy *wpan_phy) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; /* nothing to do if HW shouldn't run */ if (!local->open_count) goto wake_up; /* restart hardware */ ret = drv_start(local); if (ret) return ret; wake_up: ieee802154_wake_queue(&local->hw); local->suspended = false; return 0; }
static void atusb_tx_done(struct atusb *atusb, uint8_t seq) { struct usb_device *usb_dev = atusb->usb_dev; uint8_t expect = atusb->tx_ack_seq; dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect); if (seq == expect) { /* TODO check for ifs handling in firmware */ ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false); } else { /* TODO I experience this case when atusb has a tx complete * irq before probing, we should fix the firmware it's an * unlikely case now that seq == expect is then true, but can * happen and fail with a tx_skb = NULL; */ ieee802154_wake_queue(atusb->hw); if (atusb->tx_skb) dev_kfree_skb_irq(atusb->tx_skb); } }
static netdev_tx_t ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) { struct net_device *dev = skb->dev; int ret; if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { u16 crc = crc_ccitt(0, skb->data, skb->len); put_unaligned_le16(crc, skb_put(skb, 2)); } if (skb_cow_head(skb, local->hw.extra_tx_headroom)) goto err_tx; /* Stop the netif queue on each sub_if_data object. */ ieee802154_stop_queue(&local->hw); /* async is priority, otherwise sync is fallback */ if (local->ops->xmit_async) { ret = drv_xmit_async(local, skb); if (ret) { ieee802154_wake_queue(&local->hw); goto err_tx; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } else { local->tx_skb = skb; queue_work(local->workqueue, &local->tx_work); } return NETDEV_TX_OK; err_tx: kfree_skb(skb); return NETDEV_TX_OK; }