/** * dev_mc_unsync - Remove synchronized addresses from the destination * device * @to: destination device * @from: source device * * Remove all addresses that were added to the destination device by * dev_mc_sync(). This function is intended to be called from the * dev->stop function of layered software devices. */ void dev_mc_unsync(struct net_device *to, struct net_device *from) { struct dev_addr_list *da, *next, *da_to; netif_tx_lock_bh(from); netif_tx_lock_bh(to); da = from->mc_list; while (da != NULL) { bool synced = false; next = da->next; da_to = to->mc_list; /* 2.6.22 does not have da->da_synced so lets take the long route */ while (da_to != NULL) { if (memcmp(da_to->da_addr, da->da_addr, da_to->da_addrlen) == 0 && da->da_addrlen == da_to->da_addrlen) synced = true; break; } if (!synced) { da = next; continue; } __dev_addr_delete(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); __dev_addr_delete(&from->mc_list, &from->mc_count, da->da_addr, da->da_addrlen, 0); da = next; } __dev_set_rx_mode(to); netif_tx_unlock_bh(to); netif_tx_unlock_bh(from); }
/** * dev_mc_sync - Synchronize device's multicast list to another device * @to: destination device * @from: source device * * Add newly added addresses to the destination device and release * addresses that have no users left. The source device must be * locked by netif_tx_lock_bh. * * This function is intended to be called from the dev->set_multicast_list * function of layered software devices. */ int dev_mc_sync(struct net_device *to, struct net_device *from) { struct dev_addr_list *da, *next; int err = 0; netif_tx_lock_bh(to); da = from->mc_list; while (da != NULL) { next = da->next; if (!da->da_synced) { err = __dev_addr_add(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); if (err < 0) break; da->da_synced = 1; da->da_users++; } else if (da->da_users == 1) { __dev_addr_delete(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); __dev_addr_delete(&from->mc_list, &from->mc_count, da->da_addr, da->da_addrlen, 0); } da = next; } if (!err) __dev_set_rx_mode(to); netif_tx_unlock_bh(to); return err; }
static void unlink_clip_vcc(struct clip_vcc *clip_vcc) { struct atmarp_entry *entry = clip_vcc->entry; struct clip_vcc **walk; if (!entry) { printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); return; } netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ entry->neigh->used = jiffies; for (walk = &entry->vccs; *walk; walk = &(*walk)->next) if (*walk == clip_vcc) { int error; *walk = clip_vcc->next; /* atomic */ clip_vcc->entry = NULL; if (clip_vcc->xoff) netif_wake_queue(entry->neigh->dev); if (entry->vccs) goto out; entry->expires = jiffies - 1; /* force resolution or expiration */ error = neigh_update(entry->neigh, NULL, NUD_NONE, NEIGH_UPDATE_F_ADMIN); if (error) printk(KERN_CRIT "unlink_clip_vcc: " "neigh_update failed with %d\n", error); goto out; } printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " "0x%p)\n", entry, clip_vcc); out: netif_tx_unlock_bh(entry->neigh->dev); }
static void dev_watchdog_down(struct net_device *dev) { netif_tx_lock_bh(dev); if (del_timer(&dev->watchdog_timer)) dev_put(dev); netif_tx_unlock_bh(dev); }
int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) { int err = 0; struct dev_mc_list *dmi, **dmip; netif_tx_lock_bh(dev); for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { /* * Find the entry we want to delete. The device could * have variable length entries so check these too. */ if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && alen == dmi->dmi_addrlen) { if (glbl) { int old_glbl = dmi->dmi_gusers; dmi->dmi_gusers = 0; if (old_glbl == 0) break; } if (--dmi->dmi_users) goto done; /* * Last user. So delete the entry. */ *dmip = dmi->next; dev->mc_count--; kfree(dmi); /* * We have altered the list, so the card * loaded filter is now wrong. Fix it */ __dev_mc_upload(dev); netif_tx_unlock_bh(dev); return 0; } } err = -ENOENT; done: netif_tx_unlock_bh(dev); return err; }
static int efx_end_loopback(struct efx_tx_queue *tx_queue, struct efx_loopback_self_tests *lb_tests) { struct efx_nic *efx = tx_queue->efx; struct efx_loopback_state *state = efx->loopback_selftest; struct sk_buff *skb; int tx_done = 0, rx_good, rx_bad; int i, rc = 0; if (efx_dev_registered(efx)) netif_tx_lock_bh(efx->net_dev); /* Count the number of tx completions, and decrement the refcnt. Any * skbs not already completed will be free'd when the queue is flushed */ for (i=0; i < state->packet_count; i++) { skb = state->skbs[i]; if (skb && !skb_shared(skb)) ++tx_done; dev_kfree_skb_any(skb); } if (efx_dev_registered(efx)) netif_tx_unlock_bh(efx->net_dev); /* Check TX completion and received packet counts */ rx_good = atomic_read(&state->rx_good); rx_bad = atomic_read(&state->rx_bad); if (tx_done != state->packet_count) { /* Don't free the skbs; they will be picked up on TX * overflow or channel teardown. */ EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " "TX completion events in %s loopback test\n", tx_queue->queue, tx_done, state->packet_count, LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Allow to fall through so we see the RX errors as well */ } /* We may always be up to a flush away from our desired packet total */ if (rx_good != state->packet_count) { EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " "received packets in %s loopback test\n", tx_queue->queue, rx_good, state->packet_count, LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Fall through */ } /* Update loopback test structure */ lb_tests->tx_sent[tx_queue->queue] += state->packet_count; lb_tests->tx_done[tx_queue->queue] += tx_done; lb_tests->rx_good += rx_good; lb_tests->rx_bad += rx_bad; return rc; }
int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) { int err = 0; struct dev_mc_list *dmi, *dmi1; dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); netif_tx_lock_bh(dev); for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && dmi->dmi_addrlen == alen) { if (glbl) { int old_glbl = dmi->dmi_gusers; dmi->dmi_gusers = 1; if (old_glbl) goto done; } dmi->dmi_users++; goto done; } } if ((dmi = dmi1) == NULL) { netif_tx_unlock_bh(dev); return -ENOMEM; } memcpy(dmi->dmi_addr, addr, alen); dmi->dmi_addrlen = alen; dmi->next = dev->mc_list; dmi->dmi_users = 1; dmi->dmi_gusers = glbl ? 1 : 0; dev->mc_list = dmi; dev->mc_count++; __dev_mc_upload(dev); netif_tx_unlock_bh(dev); return 0; done: netif_tx_unlock_bh(dev); kfree(dmi1); return err; }
int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) { int err; netif_tx_lock_bh(dev); err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); if (!err) __dev_set_rx_mode(dev); netif_tx_unlock_bh(dev); return err; }
static int qcaspi_transmit(struct qcaspi *qca) { struct net_device_stats *n_stats = &qca->net_dev->stats; u16 available = 0; u32 pkt_len; u16 new_head; u16 packets = 0; if (qca->txr.skb[qca->txr.head] == NULL) return 0; qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); while (qca->txr.skb[qca->txr.head]) { pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; if (available < pkt_len) { if (packets == 0) qca->stats.write_buf_miss++; break; } if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { qca->stats.write_err++; return -1; } packets++; n_stats->tx_packets++; n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; available -= pkt_len; /* remove the skb from the queue */ /* XXX After inconsistent lock states netif_tx_lock() * has been replaced by netif_tx_lock_bh() and so on. */ netif_tx_lock_bh(qca->net_dev); dev_kfree_skb(qca->txr.skb[qca->txr.head]); qca->txr.skb[qca->txr.head] = NULL; qca->txr.size -= pkt_len; new_head = qca->txr.head + 1; if (new_head >= qca->txr.count) new_head = 0; qca->txr.head = new_head; if (netif_queue_stopped(qca->net_dev)) netif_wake_queue(qca->net_dev); netif_tx_unlock_bh(qca->net_dev); } return 0; }
static int efx_begin_loopback(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; struct efx_loopback_state *state = efx->loopback_selftest; struct efx_loopback_payload *payload; struct sk_buff *skb; int i; netdev_tx_t rc; /* Transmit N copies of buffer */ for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for * transmit completion counting */ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM; state->skbs[i] = skb; skb_get(skb); /* Copy the payload in, incrementing the source address to * exercise the rss vectors */ payload = ((struct efx_loopback_payload *) skb_put(skb, sizeof(state->payload))); memcpy(payload, &state->payload, sizeof(state->payload)); payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); /* Ensure everything we've written is visible to the * interrupt handler. */ smp_wmb(); if (efx_dev_registered(efx)) netif_tx_lock_bh(efx->net_dev); rc = efx_enqueue_skb(tx_queue, skb); if (efx_dev_registered(efx)) netif_tx_unlock_bh(efx->net_dev); if (rc != NETDEV_TX_OK) { EFX_ERR(efx, "TX queue %d could not transmit packet %d " "of %d in %s loopback test\n", tx_queue->queue, i + 1, state->packet_count, LOOPBACK_MODE(efx)); /* Defer cleaning up the other skbs for the caller */ kfree_skb(skb); return -EPIPE; } } return 0; }
/** * dev_mc_unsync - Remove synchronized addresses from the destination * device * @to: destination device * @from: source device * * Remove all addresses that were added to the destination device by * dev_mc_sync(). This function is intended to be called from the * dev->stop function of layered software devices. */ void dev_mc_unsync(struct net_device *to, struct net_device *from) { struct dev_addr_list *da, *next; netif_tx_lock_bh(from); netif_tx_lock_bh(to); da = from->mc_list; while (da != NULL) { next = da->next; if (!da->da_synced) continue; __dev_addr_delete(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); da->da_synced = 0; __dev_addr_delete(&from->mc_list, &from->mc_count, da->da_addr, da->da_addrlen, 0); da = next; } __dev_set_rx_mode(to); netif_tx_unlock_bh(to); netif_tx_unlock_bh(from); }
void dev_mc_discard(struct net_device *dev) { netif_tx_lock_bh(dev); while (dev->mc_list != NULL) { struct dev_mc_list *tmp = dev->mc_list; dev->mc_list = tmp->next; if (tmp->dmi_users > tmp->dmi_gusers) printk("dev_mc_discard: multicast leakage! dmi_users=%d\n", tmp->dmi_users); kfree(tmp); } dev->mc_count = 0; netif_tx_unlock_bh(dev); }
static void cdc_ncm_txpath_bh(unsigned long param) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param; spin_lock_bh(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (ctx->netdev != NULL) { spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(ctx->netdev); usbnet_start_xmit(NULL, ctx->netdev); netif_tx_unlock_bh(ctx->netdev); } else { spin_unlock_bh(&ctx->mtx); } }
int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) { int err; netif_tx_lock_bh(dev); err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, addr, alen, glbl); if (!err) { /* * We have altered the list, so the card * loaded filter is now wrong. Fix it */ __dev_set_rx_mode(dev); } netif_tx_unlock_bh(dev); return err; }
static int gfar_set_tx_csum(struct net_device *dev, uint32_t data) { struct gfar_private *priv = netdev_priv(dev); if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) return -EOPNOTSUPP; netif_tx_lock_bh(dev); if (data) dev->features |= NETIF_F_IP_CSUM; else dev->features &= ~NETIF_F_IP_CSUM; netif_tx_unlock_bh(dev); return 0; }
static int dev_mc_seq_show(struct seq_file *seq, void *v) { struct dev_mc_list *m; struct net_device *dev = v; netif_tx_lock_bh(dev); for (m = dev->mc_list; m; m = m->next) { int i; seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, dev->name, m->dmi_users, m->dmi_gusers); for (i = 0; i < m->dmi_addrlen; i++) seq_printf(seq, "%02x", m->dmi_addr[i]); seq_putc(seq, '\n'); } netif_tx_unlock_bh(dev); return 0; }
static void qcaspi_flush_tx_ring(struct qcaspi *qca) { int i; /* XXX After inconsistent lock states netif_tx_lock() * has been replaced by netif_tx_lock_bh() and so on. */ netif_tx_lock_bh(qca->net_dev); for (i = 0; i < TX_RING_MAX_LEN; i++) { if (qca->txr.skb[i]) { dev_kfree_skb(qca->txr.skb[i]); qca->txr.skb[i] = NULL; qca->net_dev->stats.tx_dropped++; } } qca->txr.tail = 0; qca->txr.head = 0; qca->txr.size = 0; netif_tx_unlock_bh(qca->net_dev); }
static void cdc_ncm_txpath_bh(unsigned long param) { struct if_usb_devdata *pipe_data = (struct if_usb_devdata *)param; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)pipe_data->sedata; if (!ctx) return; spin_lock_bh(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (pipe_data->iod->ndev != NULL) { spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(pipe_data->iod->ndev); usb_tx_skb(pipe_data, NULL); netif_tx_unlock_bh(pipe_data->iod->ndev); } else { spin_unlock_bh(&ctx->mtx); } }
/** * dev_mc_sync - Synchronize device's multicast list to another device * @to: destination device * @from: source device * * Add newly added addresses to the destination device and release * addresses that have no users left. The source device must be * locked by netif_tx_lock_bh. * * This function is intended to be called from the dev->set_multicast_list * function of layered software devices. */ int dev_mc_sync(struct net_device *to, struct net_device *from) { struct dev_addr_list *da, *next, *da_to; int err = 0; netif_tx_lock_bh(to); da = from->mc_list; while (da != NULL) { int synced = 0; next = da->next; da_to = to->mc_list; /* 2.6.22 does not have da->da_synced so lets take the long route */ while (da_to != NULL) { if (memcmp(da_to->da_addr, da->da_addr, da_to->da_addrlen) == 0 && da->da_addrlen == da_to->da_addrlen) synced = 1; break; } if (!synced) { err = __dev_addr_add(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); if (err < 0) break; da->da_users++; } else if (da->da_users == 1) { __dev_addr_delete(&to->mc_list, &to->mc_count, da->da_addr, da->da_addrlen, 0); __dev_addr_delete(&from->mc_list, &from->mc_count, da->da_addr, da->da_addrlen, 0); } da = next; } if (!err) __dev_set_rx_mode(to); netif_tx_unlock_bh(to); return err; }
void dev_mc_upload(struct net_device *dev) { netif_tx_lock_bh(dev); __dev_mc_upload(dev); netif_tx_unlock_bh(dev); }