void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) { again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ neigh_event_send(e->neigh, NULL); spin_lock_bh(&e->lock); if (e->state == L2T_STATE_STALE) { e->state = L2T_STATE_VALID; } spin_unlock_bh(&e->lock); return; case L2T_STATE_VALID: /* fast-path, send the packet on */ return; case L2T_STATE_RESOLVING: spin_lock_bh(&e->lock); if (e->state != L2T_STATE_RESOLVING) { /* ARP already completed */ spin_unlock_bh(&e->lock); goto again; } spin_unlock_bh(&e->lock); /* * Only the first packet added to the arpq should kick off * resolution. However, because the alloc_skb below can fail, * we allow each packet added to the arpq to retry resolution * as a way of recovering from transient memory exhaustion. * A better way would be to use a work request to retry L2T * entries when there's no memory. */ neigh_event_send(e->neigh, NULL); } return; }
static int __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) { struct teql_sched_data *q = (void*)dev->qdisc->data; struct neighbour *mn = skb->dst->neighbour; struct neighbour *n = q->ncache; if (mn->tbl == NULL) return -EINVAL; if (n && n->tbl == mn->tbl && memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { atomic_inc(&n->refcnt); } else { n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); if (IS_ERR(n)) return PTR_ERR(n); } if (neigh_event_send(n, skb_res) == 0) { int err; read_lock(&n->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len); read_unlock(&n->lock); if (err < 0) { neigh_release(n); return -EINVAL; } teql_neigh_release(xchg(&q->ncache, n)); return 0; } neigh_release(n); return (skb_res == NULL) ? -EAGAIN : 1; }
int arp_find(unsigned char *haddr, struct sk_buff *skb) { struct net_device *dev = skb->dev; __be32 paddr; struct neighbour *n; if (!skb->dst) { printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); kfree_skb(skb); return 1; } paddr = skb->rtable->rt_gateway; if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) return 0; n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); if (n) { n->used = jiffies; if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) { read_lock_bh(&n->lock); memcpy(haddr, n->ha, dev->addr_len); read_unlock_bh(&n->lock); neigh_release(n); return 0; } neigh_release(n); } else kfree_skb(skb); return 1; }
int arp_find(unsigned char *haddr, struct sk_buff *skb) { struct net_device *dev = skb->dev; __be32 paddr; struct neighbour *n; if (!skb_dst(skb)) { pr_debug("arp_find is called with dst==NULL\n"); kfree_skb(skb); return 1; } paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr); if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) return 0; n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); if (n) { n->used = jiffies; if (n->nud_state & NUD_VALID || neigh_event_send(n, skb) == 0) { neigh_ha_snapshot(haddr, n, dev); neigh_release(n); return 0; } neigh_release(n); } else kfree_skb(skb); return 1; }
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e) { again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ neigh_event_send(e->neigh, NULL); spin_lock_bh(&e->lock); if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); case L2T_STATE_VALID: /* fast-path, send the packet on */ return cxgb3_ofld_send(dev, skb); case L2T_STATE_RESOLVING: spin_lock_bh(&e->lock); if (e->state != L2T_STATE_RESOLVING) { /* ARP already completed */ spin_unlock_bh(&e->lock); goto again; } arpq_enqueue(e, skb); spin_unlock_bh(&e->lock); /* * Only the first packet added to the arpq should kick off * resolution. However, because the alloc_skb below can fail, * we allow each packet added to the arpq to retry resolution * as a way of recovering from transient memory exhaustion. * A better way would be to use a work request to retry L2T * entries when there's no memory. */ if (!neigh_event_send(e->neigh, NULL)) { skb = alloc_skb(sizeof(struct cpl_l2t_write_req), GFP_ATOMIC); if (!skb) break; spin_lock_bh(&e->lock); if (e->arpq_head) setup_l2e_send_pending(dev, skb, e); else /* we lost the race */ __kfree_skb(skb); spin_unlock_bh(&e->lock); } } return 0; }
static int __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev, struct netdev_queue *txq, struct neighbour *mn) { struct teql_sched_data *q = qdisc_priv(txq->qdisc); struct neighbour *n = q->ncache; if (mn->tbl == NULL) return -EINVAL; if (n && n->tbl == mn->tbl && memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { atomic_inc(&n->refcnt); } else { n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); if (IS_ERR(n)) return PTR_ERR(n); } if (neigh_event_send(n, skb_res) == 0) { int err; char haddr[MAX_ADDR_LEN]; neigh_ha_snapshot(haddr, n, dev); err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr, NULL, skb->len); if (err < 0) { neigh_release(n); return -EINVAL; } teql_neigh_release(xchg(&q->ncache, n)); return 0; } neigh_release(n); return (skb_res == NULL) ? -EAGAIN : 1; }