/* * Allocate a free L2T entry. Must be called with l2t_data.lock held. */ static struct l2t_entry *alloc_l2e(struct l2t_data *d) { struct l2t_entry *end, *e, **p; if (!atomic_read(&d->nfree)) return NULL; /* there's definitely a free entry */ for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) if (atomic_read(&e->refcnt) == 0) goto found; for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; found: d->rover = e + 1; atomic_dec(&d->nfree); /* * The entry we found may be an inactive entry that is * presently in the hash table. We need to remove it. */ if (e->state != L2T_STATE_UNUSED) { int hash = arp_hash(e->addr, e->ifindex, d); for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) if (*p == e) { *p = e->next; break; } e->state = L2T_STATE_UNUSED; } return e; }
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, struct net_device *dev) { struct l2t_entry *e; struct l2t_data *d = L2DATA(cdev); u32 addr = *(u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; int hash = arp_hash(addr, ifidx, d); struct port_info *p = netdev_priv(dev); int smt_idx = p->port_id; write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx && e->smt_idx == smt_idx) { l2t_hold(d, e); if (atomic_read(&e->refcnt) == 1) reuse_entry(e, neigh); goto done; } /* Need to allocate a new entry */ e = alloc_l2e(d); if (e) { spin_lock(&e->lock); /* avoid race with t3_l2t_free */ e->next = d->l2tab[hash].first; d->l2tab[hash].first = e; e->state = L2T_STATE_RESOLVING; e->addr = addr; e->ifindex = ifidx; e->smt_idx = smt_idx; atomic_set(&e->refcnt, 1); neigh_replace(e, neigh); if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) e->vlan = vlan_dev_vlan_id(neigh->dev); else e->vlan = VLAN_NONE; spin_unlock(&e->lock); } done: write_unlock_bh(&d->lock); return e; }
/* * Called when the host's ARP layer makes a change to some entry that is * loaded into the HW L2 table. */ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) { struct l2t_entry *e; struct sk_buff *arpq = NULL; struct l2t_data *d = L2DATA(dev); u32 addr = *(u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; int hash = arp_hash(addr, ifidx, d); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx) { spin_lock(&e->lock); goto found; } read_unlock_bh(&d->lock); return; found: read_unlock(&d->lock); if (atomic_read(&e->refcnt)) { if (neigh != e->neigh) neigh_replace(e, neigh); if (e->state == L2T_STATE_RESOLVING) { if (neigh->nud_state & NUD_FAILED) { arpq = e->arpq_head; e->arpq_head = e->arpq_tail = NULL; } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) setup_l2e_send_pending(dev, NULL, e); } else { e->state = neigh->nud_state & NUD_CONNECTED ? L2T_STATE_VALID : L2T_STATE_STALE; if (memcmp(e->dmac, neigh->ha, 6)) setup_l2e_send_pending(dev, NULL, e); } } spin_unlock_bh(&e->lock); if (arpq) handle_failed_resolution(dev, arpq); }
struct arptab *arp_lookup(struct sk_buff *skb, __be32 daddr, unsigned char *mac) { struct arptab *entry; pthread_spin_lock(&arp_lock); entry = &arp_table[arp_hash(daddr)]; if (entry->status == ARP_STATUS_EMPTY) { arp_queue_try_insert(skb); entry->status = ARP_STATUS_REQUEST; pthread_spin_unlock(&arp_lock); arp_request(daddr); goto wait; } else if (entry->status == ARP_STATUS_REQUEST) { arp_queue_try_insert(skb); pthread_spin_unlock(&arp_lock); arp_request(daddr); goto wait; } else { if (get_second() > entry->time + ARP_MAX_LIFE) { arp_queue_try_insert(skb); entry->status = ARP_STATUS_REQUEST; pthread_spin_unlock(&arp_lock); arp_request(daddr); goto wait; } else { memcpy(mac, entry->mac, ETH_ALEN); pthread_spin_unlock(&arp_lock); return entry; } } wait: return NULL; }
void arp_rcv(struct sk_buff *skb) { struct arppkt *ap; struct ethhdr *eh; unsigned int tip, sip; struct arptab *h; int hl; struct net_device *nic; hl = sizeof(struct ethhdr); skb->len -= hl; ap = (struct arppkt *) skb->data; skb->nh.arph = (struct arphdr *) skb->data; eh = (struct ethhdr *) (skb->data - hl); nic = skb->nic; /* skb->data += sizeof(struct arphdr); */ printf("--- ARP: packet received\n"); if (ap->ar_hrd != htons(ARPHRD_ETHER) || ap->ar_pro != htons(ETHERTYPE_IP) || ap->ar_hln != ETH_ALEN || ap->ar_pln != 4) goto bad; switch (ntohs(ap->ar_op)) { case ARPOP_REQUEST: tip = *(unsigned int *)ap->__ar_tip; sip = *(unsigned int *)ap->__ar_sip; if (tip != skb->nic->ip) goto drop; ap->ar_op = htons(ARPOP_REPLY); *(unsigned int *)ap->__ar_sip = skb->nic->ip; *(unsigned int *)ap->__ar_tip = sip; memcpy(ap->__ar_sha, skb->nic->dev_addr, ETH_ALEN); memcpy(ap->__ar_tha, eh->h_source, ETH_ALEN); memcpy(eh->h_dest, eh->h_source, ETH_ALEN); memcpy(eh->h_source, skb->nic->dev_addr, ETH_ALEN); skb->len +=14; skb->ip_summed = 0; skb->protocol = ETHERTYPE_ARP; dev_send(skb); goto reused; break; case ARPOP_REPLY: sip = *(unsigned int *)ap->__ar_sip; tip = *(unsigned int *)ap->__ar_tip; pthread_spin_lock(&arp_lock); h = &arp_table[arp_hash(sip)]; if (/* h->ip == sip && */ nic->ip == tip) { if (ap->__ar_sha[0] & 1) /* why? */ goto unlock_bad; memcpy(h->mac, ap->__ar_sha, ETH_ALEN); /* if (h->hold && (tv.tv_sec - h->time > ARP_MAX_HOLD)) */ /* { */ /* skb_free(h->hold); */ /* h->hold = NULL; */ /* } */ h->time = get_second(); if (h->status == ARP_STATUS_REQUEST) { h->status = ARP_STATUS_OK; pthread_spin_unlock(&arp_lock); pthread_cond_signal(&arp_queue_check); } else { /* received broadcast reply who was not requested */ h->status = ARP_STATUS_OK; pthread_spin_unlock(&arp_lock); } /* if ((skb = h->hold)) */ /* { */ /* h->hold = NULL; */ /* memcpy(eh->h_dest, h->mac, ETH_ALEN); */ /* pthread_spin_unlock(&arp_lock); */ /* /\* fragmentation? *\/ */ /* dev_send(skb); */ /* } */ /* else */ /* pthread_spin_unlock(&arp_lock); */ } else { goto unlock_bad; } break; default: goto bad; } reused: return; bad: printf("arp: packet invalid\n"); drop: skb_free(skb); return; unlock_bad: pthread_spin_unlock(&arp_lock); skb_free(skb); }