void net_bh(void *tmp) { struct sk_buff *skb; struct packet_type *pt_prev,*ptype; unsigned short type; if(set_bit(1,(void *)&in_bh)) return; dev_transmit(); cli(); while((skb = skb_dequeue(&backlog)) != NULL) { backlog_size --; sti(); skb->h.raw = skb->data + skb->dev->hard_header_len; skb->len = skb->dev->hard_header_len; type = skb->dev->type_trans(skb,skb->dev); pt_prev = NULL; for(ptype = ptype_base; ptype != NULL;ptype = ptype->next) { if(ptype->type == type && (!ptype->dev || ptype->dev == skb->dev)) { if(pt_prev) { pt_prev->func(skb,skb->dev,pt_prev); } pt_prev = ptype; } } if(pt_prev) pt_prev->func(skb,skb->dev,pt_prev); dev_transmit(); cli(); } in_bh = 0; sti(); dev_transmit(); }
void ether_dispatcher(void *arg) { struct ether_msg *msg; struct pbuf *p; struct netif *netif; struct eth_hdr *ethhdr; while (1) { msg = dequeue(ether_queue, INFINITE); if (!msg) panic("error retrieving message from ethernet packet queue\n"); p = msg->p; netif = msg->netif; kfree(msg); if (p != NULL) { ethhdr = p->payload; //if (!eth_addr_isbroadcast(ðhdr->dest)) kprintf("ether: recv src=%la dst=%la type=%04X len=%d\n", ðhdr->src, ðhdr->dest, htons(ethhdr->type), p->len); switch (htons(ethhdr->type)) { case ETHTYPE_IP: arp_ip_input(netif, p); pbuf_header(p, -ETHER_HLEN); if (netif->input(p, netif) < 0) pbuf_free(p); break; case ETHTYPE_ARP: p = arp_arp_input(netif, &netif->hwaddr, p); if (p != NULL) { if (dev_transmit((dev_t) netif->state, p) < 0) pbuf_free(p); } break; default: pbuf_free(p); break; } } //yield(); } }
void net_bh(void *tmp) { struct sk_buff *skb; struct packet_type *ptype; struct packet_type *pt_prev; unsigned short type; /* * Atomically check and mark our BUSY state. */ if (set_bit(1, (void*)&in_bh)) return; /* * Can we send anything now? We want to clear the * decks for any more sends that get done as we * process the input. */ dev_transmit(); /* * Any data left to process. This may occur because a * mark_bh() is done after we empty the queue including * that from the device which does a mark_bh() just after */ cli(); /* * While the queue is not empty */ while((skb=skb_dequeue(&backlog))!=NULL) { /* * We have a packet. Therefore the queue has shrunk */ backlog_size--; sti(); /* * Bump the pointer to the next structure. * This assumes that the basic 'skb' pointer points to * the MAC header, if any (as indicated by its "length" * field). Take care now! */ skb->h.raw = skb->data + skb->dev->hard_header_len; skb->len -= skb->dev->hard_header_len; /* * Fetch the packet protocol ID. This is also quite ugly, as * it depends on the protocol driver (the interface itself) to * know what the type is, or where to get it from. The Ethernet * interfaces fetch the ID from the two bytes in the Ethernet MAC * header (the h_proto field in struct ethhdr), but other drivers * may either use the ethernet ID's or extra ones that do not * clash (eg ETH_P_AX25). We could set this before we queue the * frame. In fact I may change this when I have time. */ type = skb->dev->type_trans(skb, skb->dev); /* * We got a packet ID. Now loop over the "known protocols" * table (which is actually a linked list, but this will * change soon if I get my way- FvK), and forward the packet * to anyone who wants it. * * [FvK didn't get his way but he is right this ought to be * hashed so we typically get a single hit. The speed cost * here is minimal but no doubt adds up at the 4,000+ pkts/second * rate we can hit flat out] */ pt_prev = NULL; for (ptype = ptype_base; ptype != NULL; ptype = ptype->next) { if ((ptype->type == type || ptype->type == htons(ETH_P_ALL)) && (!ptype->dev || ptype->dev==skb->dev)) { /* * We already have a match queued. Deliver * to it and then remember the new match */ if(pt_prev) { struct sk_buff *skb2; skb2=skb_clone(skb, GFP_ATOMIC); /* * Kick the protocol handler. This should be fast * and efficient code. */ if(skb2) pt_prev->func(skb2, skb->dev, pt_prev); } /* Remember the current last to do */ pt_prev=ptype; } } /* End of protocol list loop */ /* * Is there a last item to send to ? */ if(pt_prev) pt_prev->func(skb, skb->dev, pt_prev); /* * Has an unknown packet has been received ? */ else kfree_skb(skb, FREE_WRITE); /* * Again, see if we can transmit anything now. * [Ought to take this out judging by tests it slows * us down not speeds us up] */ dev_transmit(); cli(); } /* End of queue loop */ /* * We have emptied the queue */ in_bh = 0; sti(); /* * One last output flush. */ dev_transmit(); }
void net_bh(void) { struct packet_type *ptype; struct packet_type *pt_prev; unsigned short type; /* * Can we send anything now? We want to clear the * decks for any more sends that get done as we * process the input. This also minimises the * latency on a transmit interrupt bh. */ dev_transmit(); /* * Any data left to process. This may occur because a * mark_bh() is done after we empty the queue including * that from the device which does a mark_bh() just after */ /* * While the queue is not empty.. * * Note that the queue never shrinks due to * an interrupt, so we can do this test without * disabling interrupts. */ while (!skb_queue_empty(&backlog)) { struct sk_buff * skb = backlog.next; /* * We have a packet. Therefore the queue has shrunk */ cli(); __skb_unlink(skb, &backlog); backlog_size--; sti(); #ifdef CONFIG_BRIDGE /* * If we are bridging then pass the frame up to the * bridging code. If it is bridged then move on */ if (br_stats.flags & BR_UP) { /* * We pass the bridge a complete frame. This means * recovering the MAC header first. */ int offset=skb->data-skb->mac.raw; cli(); skb_push(skb,offset); /* Put header back on for bridge */ if(br_receive_frame(skb)) { sti(); continue; } /* * Pull the MAC header off for the copy going to * the upper layers. */ skb_pull(skb,offset); sti(); } #endif /* * Bump the pointer to the next structure. * * On entry to the protocol layer. skb->data and * skb->h.raw point to the MAC and encapsulated data */ skb->h.raw = skb->data; /* * Fetch the packet protocol ID. */ type = skb->protocol; /* * We got a packet ID. Now loop over the "known protocols" * list. There are two lists. The ptype_all list of taps (normally empty) * and the main protocol list which is hashed perfectly for normal protocols. */ pt_prev = NULL; for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next) { if(!ptype->dev || ptype->dev == skb->dev) { if(pt_prev) { struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); if(skb2) pt_prev->func(skb2,skb->dev, pt_prev); } pt_prev=ptype; } } for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next) { if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) { /* * We already have a match queued. Deliver * to it and then remember the new match */ if(pt_prev) { struct sk_buff *skb2; skb2=skb_clone(skb, GFP_ATOMIC); /* * Kick the protocol handler. This should be fast * and efficient code. */ if(skb2) pt_prev->func(skb2, skb->dev, pt_prev); } /* Remember the current last to do */ pt_prev=ptype; } } /* End of protocol list loop */ /* * Is there a last item to send to ? */ if(pt_prev) pt_prev->func(skb, skb->dev, pt_prev); /* * Has an unknown packet has been received ? */ else kfree_skb(skb, FREE_WRITE); /* * Again, see if we can transmit anything now. * [Ought to take this out judging by tests it slows * us down not speeds us up] */ #ifdef XMIT_EVERY dev_transmit(); #endif } /* End of queue loop */ /* * We have emptied the queue */ /* * One last output flush. */ #ifdef XMIT_AFTER dev_transmit(); #endif }
err_t ether_output(struct netif *netif, struct pbuf *p, struct ip_addr *ipaddr) { struct pbuf *q; struct eth_hdr *ethhdr; struct eth_addr *dest, mcastaddr; struct ip_addr *queryaddr; err_t err; int i; int loopback = 0; //kprintf("ether: xmit %d bytes, %d bufs\n", p->tot_len, pbuf_clen(p)); if ((netif->flags & NETIF_UP) == 0) return -ENETDOWN; if (pbuf_header(p, ETHER_HLEN)) { kprintf(KERN_ERR "ether_output: not enough room for Ethernet header in pbuf\n"); stats.link.err++; return -EBUF; } // Construct Ethernet header. Start with looking up deciding which // MAC address to use as a destination address. Broadcasts and // multicasts are special, all other addresses are looked up in the // ARP table. queryaddr = ipaddr; if (ip_addr_isany(ipaddr) || ip_addr_isbroadcast(ipaddr, &netif->netmask)) { dest = (struct eth_addr *) ðbroadcast; } else if (ip_addr_ismulticast(ipaddr)) { // Hash IP multicast address to MAC address. mcastaddr.addr[0] = 0x01; mcastaddr.addr[1] = 0x0; mcastaddr.addr[2] = 0x5e; mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; mcastaddr.addr[4] = ip4_addr3(ipaddr); mcastaddr.addr[5] = ip4_addr4(ipaddr); dest = &mcastaddr; } else if (ip_addr_cmp(ipaddr, &netif->ipaddr)) { dest = &netif->hwaddr; loopback = 1; } else { if (ip_addr_maskcmp(ipaddr, &netif->ipaddr, &netif->netmask)) { // Use destination IP address if the destination is on the same subnet as we are. queryaddr = ipaddr; } else { // Otherwise we use the default router as the address to send the Ethernet frame to. queryaddr = &netif->gw; } dest = arp_lookup(queryaddr); } // If the arp_lookup() didn't find an address, we send out an ARP query for the IP address. if (dest == NULL) { q = arp_query(netif, &netif->hwaddr, queryaddr); if (q != NULL) { err = dev_transmit((dev_t) netif->state, q); if (err < 0) { kprintf(KERN_ERR "ether: error %d sending arp packet\n", err); pbuf_free(q); stats.link.drop++; return err; } } // Queue packet for transmission, when the ARP reply returns err = arp_queue(netif, p, queryaddr); if (err < 0) { kprintf(KERN_ERR "ether: error %d queueing packet\n", err); stats.link.drop++; stats.link.memerr++; return err; } return 0; } ethhdr = p->payload; for (i = 0; i < 6; i++) { ethhdr->dest.addr[i] = dest->addr[i]; ethhdr->src.addr[i] = netif->hwaddr.addr[i]; } ethhdr->type = htons(ETHTYPE_IP); if (loopback) { struct pbuf *q; q = pbuf_dup(PBUF_RAW, p); if (!q) return -ENOMEM; err = ether_input(netif, q); if (err < 0) { pbuf_free(q); return err; } } else { err = dev_transmit((dev_t) netif->state, p); if (err < 0) { kprintf(KERN_ERR "ether: error %d sending packet\n", err); return err; } } return 0; }