static int __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) { struct teql_sched_data *q = (void*)dev->qdisc->data; struct neighbour *mn = skb->dst->neighbour; struct neighbour *n = q->ncache; if (mn->tbl == NULL) return -EINVAL; if (n && n->tbl == mn->tbl && memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { atomic_inc(&n->refcnt); } else { n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); if (IS_ERR(n)) return PTR_ERR(n); } if (neigh_event_send(n, skb_res) == 0) { int err; read_lock(&n->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len); read_unlock(&n->lock); if (err < 0) { neigh_release(n); return -EINVAL; } teql_neigh_release(xchg(&q->ncache, n)); return 0; } neigh_release(n); return (skb_res == NULL) ? -EAGAIN : 1; }
static void teql_destroy(struct Qdisc* sch) { struct Qdisc *q, *prev; struct teql_sched_data *dat = (struct teql_sched_data *)sch->data; struct teql_master *master = dat->m; if ((prev = master->slaves) != NULL) { do { q = NEXT_SLAVE(prev); if (q == sch) { NEXT_SLAVE(prev) = NEXT_SLAVE(q); if (q == master->slaves) { master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { master->slaves = NULL; spin_lock_bh(&master->dev.queue_lock); qdisc_reset(master->dev.qdisc); spin_unlock_bh(&master->dev.queue_lock); } } skb_queue_purge(&dat->q); teql_neigh_release(xchg(&dat->ncache, NULL)); break; } } while ((prev = q) != master->slaves); } MOD_DEC_USE_COUNT; }
static void teql_reset(struct Qdisc* sch) { struct teql_sched_data *dat = (struct teql_sched_data *)sch->data; skb_queue_purge(&dat->q); sch->q.qlen = 0; teql_neigh_release(xchg(&dat->ncache, NULL)); }
static int __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev, struct netdev_queue *txq, struct neighbour *mn) { struct teql_sched_data *q = qdisc_priv(txq->qdisc); struct neighbour *n = q->ncache; if (mn->tbl == NULL) return -EINVAL; if (n && n->tbl == mn->tbl && memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { atomic_inc(&n->refcnt); } else { n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); if (IS_ERR(n)) return PTR_ERR(n); } if (neigh_event_send(n, skb_res) == 0) { int err; char haddr[MAX_ADDR_LEN]; neigh_ha_snapshot(haddr, n, dev); err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr, NULL, skb->len); if (err < 0) { neigh_release(n); return -EINVAL; } teql_neigh_release(xchg(&q->ncache, n)); return 0; } neigh_release(n); return (skb_res == NULL) ? -EAGAIN : 1; }
static void teql_destroy(struct Qdisc *sch) { struct Qdisc *q, *prev; struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m; prev = master->slaves; if (prev) { do { q = NEXT_SLAVE(prev); if (q == sch) { NEXT_SLAVE(prev) = NEXT_SLAVE(q); if (q == master->slaves) { master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { struct netdev_queue *txq; spinlock_t *root_lock; txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; root_lock = qdisc_root_sleeping_lock(txq->qdisc); spin_lock_bh(root_lock); qdisc_reset(txq->qdisc); spin_unlock_bh(root_lock); } } skb_queue_purge(&dat->q); teql_neigh_release(xchg(&dat->ncache, NULL)); break; } } while ((prev = q) != master->slaves); } }