PACKET pk_prepend(PACKET pkt, int bigger) { PACKET newpkt; LOCK_NET_RESOURCE(FREEQ_RESID); newpkt = pk_alloc(bigger); UNLOCK_NET_RESOURCE(FREEQ_RESID); if(!newpkt) { LOCK_NET_RESOURCE(FREEQ_RESID); pk_free(pkt); UNLOCK_NET_RESOURCE(FREEQ_RESID); return NULL; } newpkt->nb_prot = newpkt->nb_buff; /* no MAC prepend */ /* set lengths of this buffer - no data yet */ newpkt->nb_plen = 0; /* nothing in this buffer */ newpkt->nb_tlen = pkt->nb_tlen; /* total chain length unchanged */ newpkt->pk_next = pkt; newpkt->pk_prev = NULL; /* ensure that the newly allocated buffer's PKF_HEAPBUF and * PKF_INTRUNSAFE flags aren't overwritten */ newpkt->flags |= (pkt->flags & (~(PKF_HEAPBUF | PKF_INTRUNSAFE))); newpkt->net = pkt->net; pkt->pk_prev = newpkt; /* link new pkt */ return(newpkt); }
struct mbuf * m_getnbuf(int type, int len) { struct mbuf * m; PACKET pkt = NULL; #ifdef NPDEBUG if (type < MT_RXDATA || type > MT_IFADDR) { dtrap(); /* is this OK? */ } #endif /* if caller has data (len >= 0), we need to allocate * a packet buffer; else all we need is the mbuf */ if (len != 0) { LOCK_NET_RESOURCE(FREEQ_RESID); pkt = pk_alloc(len + HDRSLEN); UNLOCK_NET_RESOURCE(FREEQ_RESID); if (!pkt) return NULL; } m = (struct mbuf *)getq(&mfreeq); if (!m) { if (pkt) { LOCK_NET_RESOURCE(FREEQ_RESID); pk_free(pkt); UNLOCK_NET_RESOURCE(FREEQ_RESID); } return NULL; } m->m_type = type; if (len == 0) { m->pkt = NULL; m->m_base = NULL; /* caller better fill these in! */ m->m_memsz = 0; } else { m->pkt = pkt; /* set m_data to the part where tcp data should go */ m->m_base = m->m_data = pkt->nb_prot = pkt->nb_buff + HDRSLEN; m->m_memsz = pkt->nb_blen - HDRSLEN; } m->m_len = 0; m->m_next = m->m_act = NULL; mbstat.allocs++; /* maintain local statistics */ putq(&mbufq, (qp)m); return m; }
PACKET pk_gather(PACKET pkt, int headerlen) { PACKET newpkt; PACKET tmppkt; int oldlen, newlen; char * cp; oldlen = pkt->nb_tlen; LOCK_NET_RESOURCE(FREEQ_RESID); newpkt = pk_alloc(oldlen + headerlen); UNLOCK_NET_RESOURCE(FREEQ_RESID); if(!newpkt) return NULL; newpkt->nb_prot = newpkt->nb_buff + headerlen; /* ensure that the newly allocated buffer's PKF_HEAPBUF and * PKF_INTRUNSAFE flags aren't overwritten */ newpkt->flags |= (pkt->flags & (~(PKF_HEAPBUF | PKF_INTRUNSAFE))); newpkt->net = pkt->net; newpkt->pk_prev = newpkt->pk_next = NULL; tmppkt = pkt; /* save packet for pk_free call below */ newlen = 0; cp = newpkt->nb_prot; while(pkt) { MEMCPY(cp, pkt->nb_prot, pkt->nb_plen); newlen += pkt->nb_plen; cp += pkt->nb_plen; /* bump pointer to data */ pkt = pkt->pk_next; /* next packet in chain */ } LOCK_NET_RESOURCE(FREEQ_RESID); pk_free(tmppkt); /* free last packet in chain */ UNLOCK_NET_RESOURCE(FREEQ_RESID); if(newlen != oldlen) /* make sure length was right */ panic("pk_gather"); newpkt->nb_plen = newlen; return newpkt; }
static void dm9000a_isr(int iface) { unsigned char rx_rdy, istatus; unsigned int tmp, rx_sts, i, rx_len; struct ethhdr * eth; PACKET pkt; DM9KA dm9ka = (DM9KA)nets[iface]->n_local; /* mask NIC interrupts IMR: PAR only */ dm9000a_iow(IMR, PAR_set); istatus = dm9000a_ior(ISR); rx_rdy = dm9000a_rxReady(dm9ka); usleep(STD_DELAY); while(rx_rdy == DM9000_PKT_READY) { /* get RX Status & Length from RX SRAM */ /* set MRCMD REG. F2H RX I/O port ready */ IOWR(dm9ka->regbase, IO_addr, MRCMD); usleep(STD_DELAY); rx_sts = IORD(dm9ka->regbase,IO_data); usleep(STD_DELAY); rx_len = IORD(dm9ka->regbase,IO_data); /* Check this packet_status: GOOD or BAD? */ if( !(rx_sts & 0xBF00) && (rx_len < MAX_PACKET_SIZE) ) { if ((pkt = pk_alloc(rx_len + ETHHDR_BIAS)) == NULL) { /* couldn't get a free buffer for rx */ dm9ka->netp->n_mib->ifInDiscards++; /* treat packet as bad, dump it from RX SRAM */ for (i = 0; i < rx_len; i += 2) { usleep(STD_DELAY); tmp = IORD(dm9ka->regbase, IO_data); } } else { /* packet allocation succeeded */ unsigned char* data_ptr = pkt->nb_buff + ETHHDR_BIAS; /* read 1 received packet from RX SRAM into RX packet buffer */ for (i = 0; i < rx_len; i += 2) { usleep(STD_DELAY); tmp = IORD(dm9ka->regbase, IO_data); *data_ptr++ = tmp & 0xFF; *data_ptr++ = (tmp>>8) & 0xFF; } pkt->nb_prot = pkt->nb_buff + ETHHDR_SIZE; pkt->nb_plen = rx_len - 14; pkt->nb_tstamp = cticks; pkt->net = dm9ka->netp; /* set packet type for demux routine */ eth = (struct ethhdr *)(pkt->nb_buff + ETHHDR_BIAS); pkt->type = eth->e_type; /* shove packet into iniche stack's recv queue */ //printf("rx: 0x%x l %d s %x:%x:%x\n", eth->e_type, rx_len, // eth->e_src[0], eth->e_src[1], eth->e_src[2]); putq(&rcvdq, pkt); SignalPktDemux(); } } else { /* this packet is bad, dump it from RX SRAM */ for (i = 0; i < rx_len; i += 2) { usleep(STD_DELAY); tmp = IORD(dm9ka->regbase, IO_data); } rx_len = 0; } usleep(STD_DELAY); rx_rdy = dm9000a_rxReady(dm9ka); usleep(STD_DELAY); }
void igmp_send (u_char type, struct in_multi * inm) { PACKET p; struct igmp * igmp; struct ip_moptions * imop; struct ip_moptions simo; struct ip * pip; int i; u_char * tmpp; u_char opts [2] = {IP_RTR_ALERT_OPT, EOL_OPT}; u_char * optp; u_char reqd_len; /* compute length of buffer required for outgoing packet. * also account for the length of the IP Router Alert * option, if required. */ reqd_len = MaxLnh + sizeof (struct ip) + sizeof (struct igmp); if ((type == IGMPv2_LEAVE_GROUP) || (type == IGMPv2_MEMBERSHIP_REPORT)) { reqd_len += IP_RTR_ALERT_OPT_SIZE; } /* obtain a packet to send the IGMP message */ LOCK_NET_RESOURCE (FREEQ_RESID); p = pk_alloc (reqd_len); UNLOCK_NET_RESOURCE (FREEQ_RESID); /* log an error and return if the allocation fails */ if (!p) { ++igmpstats.igmp_pkt_alloc_fail; return; } /* Need to fill in the source and destination ip addresses */ pip = (struct ip *) p->nb_prot; pip->ip_src = inm->inm_netp->n_ipaddr; /* Leave Group messages are sent to the all-routers multicast group */ if (type == IGMPv2_LEAVE_GROUP) { /* igmp_all_rtrs_group is already in network byte order */ pip->ip_dest = igmp_all_rtrs_group; } else pip->ip_dest = inm->inm_addr; p->fhost = pip->ip_dest; tmpp = (((u_char *) p->nb_prot) + sizeof (struct ip)); /* when transmitting an IGMP packet, our IGMP module will insert * data for the Router Alert option in the following types of * packets: Version 2 Membership Report (0x16) and Leave Group * (0x17) */ if ((type == IGMPv2_LEAVE_GROUP) || (type == IGMPv2_MEMBERSHIP_REPORT)) { /* provide space for ip_write2 () to write option-related data */ tmpp += IP_RTR_ALERT_OPT_SIZE; optp = &(opts [0]); /* one option (IP Router Alert) */ } /* outgoing packet does not require any options */ else optp = &(opts [1]); /* point to the start of the IGMP header */ igmp = (struct igmp *) tmpp; igmp->igmp_type = type; igmp->igmp_code = 0; /* all messages (Report or Leave) have Group Address field * set to the group being reported or left */ igmp->igmp_group = inm->inm_addr; igmp->igmp_cksum = 0; igmp->igmp_cksum = ~cksum((void*)igmp, IGMP_MINLEN>>1); imop = &simo; MEMSET(imop, 0, sizeof(simo)); imop->imo_multicast_netp = inm->inm_netp; imop->imo_multicast_ttl = 1; /* we do not want our own reports to be looped back */ imop->imo_multicast_loop = 0; /* set nb_prot to point to the beginning of the IGMP data, * and nb_plen to the length of the IGMP data, and attach * the multicast options structure to the outgoing packet */ p->nb_prot = (char *) tmpp; p->nb_plen = sizeof(struct igmp); p->imo = imop; i = ip_write2 (IGMP_PROT, p, optp); if (type == IGMPv2_LEAVE_GROUP) ++igmpstats.igmpv2mode_v2_leave_msgs_sent; else if (type == IGMPv2_MEMBERSHIP_REPORT) ++igmpstats.igmpv2mode_v2_reports_sent; else if (type == IGMP_HOST_MEMBERSHIP_REPORT) ++igmpstats.igmp_v1_reports_sent; }
int icmpEcho(ip_addr host, /* host to ping - 32 bit, network-endian */ char * data, /* ping data, NULL if don't care */ unsigned datalen, /* length of data to attach to ping request */ unshort pingseq) /* ping sequence number */ { PACKET p; int ip_err; struct ping * e; struct ip * pip; LOCK_NET_RESOURCE(FREEQ_RESID); p = pk_alloc(PINGHDRSLEN + datalen); UNLOCK_NET_RESOURCE(FREEQ_RESID); if (!p) { #ifdef NPDEBUG if (NDEBUG & IPTRACE) dprintf("icmp: can't alloc packet\n"); #endif return(ENP_NOBUFFER); } p->nb_prot = p->nb_buff + PINGHDRSLEN; p->nb_plen = datalen; p->fhost = host; if(host == 0xFFFFFFFF) /* broadcast? */ p->net = nets[0]; /* then use first iface */ /* copy in data field */ if (data) { MEMCPY(p->nb_prot, data, datalen); } else /* caller didn't specify data */ { unsigned donedata; strcpy(p->nb_prot, pingdata); donedata = (unsigned)strlen(pingdata); while (donedata < datalen) { *(p->nb_prot + donedata) = (char)((donedata) & 0x00FF); donedata++; } } /* adjust packet pointers to icmp ping header */ p->nb_prot -= sizeof(struct ping); p->nb_plen += sizeof(struct ping); /* fill in icmp ping header */ e = (struct ping *)p->nb_prot; e->ptype = ECHOREQ; e->pcode = 0; e->pid = 0; e->pseq = pingseq; /* Calculate the checksum */ e->pchksum = 0; if (datalen & 1) /* if data size is odd, pad with a zero */ *((char*)(e+1) + datalen) = 0; e->pchksum = ~cksum(e, (ICMPSIZE+datalen+1)>>1); /* need to fill in IP addresses at this layer too */ pip = (struct ip *)(p->nb_prot - sizeof(struct ip)); pip->ip_src = ip_mymach(host); pip->ip_dest = host; LOCK_NET_RESOURCE(NET_RESID); ip_err = ip_write(ICMP_PROT, p); /* send down to IP layer */ UNLOCK_NET_RESOURCE(NET_RESID); /* Errors are negative. A zero means send was OK. a positive number * usually means we had to ARP. Assume this will work and count a send. */ if(ip_err < 0) { #ifdef NPDEBUG if (NDEBUG & NETERR) dprintf("icmp: can't send echo request\n"); #endif /* rfc 1156 seems to say not to count these. (pg 48) -JB- */ /* LOCK_NET_RESOURCE(FREEQ_RESID); */ /* pk_free(p); */ /* UNLOCK_NET_RESOURCE(FREEQ_RESID); */ return(ip_err); } /* fall to here if we sent echo request OK */ icmp_mib.icmpOutMsgs++; icmp_mib.icmpOutEchos++; return(0); }
int lb_raw_send(struct net * netp, char * buffer, unsigned length) { struct ethhdr * eth; IFMIB mib; PACKET pkt; #ifdef NPDEBUG /* Sanity check interface pointer */ if(netp->raw_send != lb_raw_send) { dprintf("macloop: bad net\n"); dtrap(); } #endif /* Don't send if iface is logically down */ if(netp->n_mib->ifAdminStatus != NI_UP) { netp->n_mib->ifOutDiscards++; /* bump mib counter for these */ return ENP_LOGIC; /* right thing to do? */ } /* maintain mib xmit stats */ mib = netp->n_mib; if (*buffer & 0x01) /* see if multicast bit is on */ mib->ifOutNUcastPkts++; else mib->ifOutUcastPkts++; mib->ifOutOctets += length; /* at this point we make the logical switch from sending to receiving */ /* fill in a packet for the "received" buffer */ LOCK_NET_RESOURCE(FREEQ_RESID); pkt = pk_alloc(length); UNLOCK_NET_RESOURCE(FREEQ_RESID); if (!pkt) { mib->ifInDiscards++; return ENP_RESOURCE; } MEMCPY(pkt->nb_buff, buffer, length); pkt->nb_prot = pkt->nb_buff + ETHHDR_SIZE; /* point to IP header */ pkt->nb_plen = length - ETHHDR_SIZE; /* IP length */ pkt->net = netp; eth = (struct ethhdr *)(pkt->nb_buff + ETHHDR_BIAS); MEMCPY(eth->e_dst, (void *)lpbhaddr, 6); MEMCPY(eth->e_src, (void *)lpbhaddr, 6); pkt->type = eth->e_type; mib->ifInOctets += length; /* queue the packet in rcvdq */ putq(&rcvdq, (q_elt)pkt); /* Most ports should now wake the packet demuxer task */ SignalPktDemux(); return 0; /* OK return */ }
int ip_output(struct mbuf * data, struct ip_socopts * so_optsPack) /* mbuf chain with data to send */ { struct ip * bip; struct tcphdr * tcpp; PACKET pkt; struct mbuf * m1, * m2, * mtmp; /* temp mbuf pointers */ int e; /* error holder */ int total; /* reassemble mbufs into a contiguous packet. Do this with as * little copying as possible. Typically the mbufs will be either * 1) a single mbuf with iptcp header info only (e.g.tcp ACK * packet), or 2) iptcp header with data mbuf chained to it, or 3) * #2) with a tiny option data mbuf between header and data. */ if ((data->m_next)) { m1 = data; m2 = data->m_next; /* If m2 is small (e.g. options), copy it to m1 and free it */ while (m2 && (m2->m_len < 10)) { pkt = m1->pkt; if ((pkt->nb_buff + pkt->nb_blen) > /* make sure m2 will fit in m1 */ (m1->m_data + m1->m_len + m2->m_len)) { MEMCPY((m1->m_data + m1->m_len), m2->m_data, m2->m_len); m1->m_len += m2->m_len; m1->m_next = m2->m_next; m_free(m2); /* free this m2.... */ m2 = m1->m_next; /* ...and thread the next one */ tcpstat.tcps_oappends++; } else /* if won't fit, fall to next copy */ break; } while (m2) /* If we still have two or more buffers, more copying: */ { /* try prepending m1 to m2, first see if it fits: */ e = m2->m_data - m2->pkt->nb_buff; /* e is prepend space */ if (e < MaxLnh) { #ifdef NPDEBUG dprintf("nptcp: MaxLnh:%d, e:%d\n", MaxLnh, e); #endif panic("tcp_out:mbuf-nbuf"); /* sanity check */ } if ((m1->m_len < (unsigned)(e - MaxLnh)) /* leave room for MAC */ && ((m1->m_len & (ALIGN_TYPE - 1)) == 0) /* and stay aligned */ && ((m2->m_data - m2->pkt->nb_buff) == HDRSLEN)) /* be at start */ { MEMCPY((m2->m_data - m1->m_len), m1->m_data, m1->m_len); m2->m_data -= m1->m_len; /* fix target to reflect prepend */ m2->m_len += m1->m_len; m_free(m1); /* free head (copied) mbuf */ data = m1 = m2; /* move other mbufs up the chain */ m2 = m2->m_next; /* loop to while(m2) test */ tcpstat.tcps_oprepends++; } else /* if won't fit, fall to next copy */ break; } if (m2) /* If all else fails, brute force copy: */ { total = 0; for (mtmp = m1; mtmp; mtmp = mtmp->m_next) total += mtmp->m_len; LOCK_NET_RESOURCE(FREEQ_RESID); pkt = pk_alloc(total + HDRSLEN); UNLOCK_NET_RESOURCE(FREEQ_RESID); if (!pkt) return ENOBUFS; pkt->nb_prot = pkt->nb_buff + MaxLnh; mtmp = m1; while (mtmp) { MEMCPY(pkt->nb_prot, mtmp->m_data, mtmp->m_len); pkt->nb_prot += mtmp->m_len; pkt->nb_plen += mtmp->m_len; m2 = mtmp; mtmp = mtmp->m_next; if (m2 != data) /* save original head */ m_free(m2); tcpstat.tcps_ocopies++; } pkt->nb_prot -= total; /* fix data pointer */ /* release the original mbufs packet install the new one */ LOCK_NET_RESOURCE(FREEQ_RESID); pk_free(data->pkt); UNLOCK_NET_RESOURCE(FREEQ_RESID); data->pkt = pkt; data->m_len = pkt->nb_plen; data->m_next = NULL; data->m_data = pkt->nb_prot; data->m_len = total; } } if ((data->m_data < (data->pkt->nb_buff + MaxLnh))) panic("ip_output: overflow"); pkt = data->pkt; /* do we have options? */ if (so_optsPack) pkt->soxopts = so_optsPack; /* yup */ #ifdef IP6_ROUTING else { panic("ip_output: no so_optsPack for the IPv6 scope"); } #endif /* fill in dest host for IP layer */ bip = (struct ip *)data->m_data; pkt->fhost = bip->ip_dest; /* make enough IP header for cksum calculation */ bip->ip_ver_ihl = 0x45; bip->ip_len = htons(bip->ip_len); /* make net endian for calculation */ tcpp = (struct tcphdr *)ip_data(bip); #ifdef CSUM_DEMO if (!(tcpp->th_flags & TH_SYN)) tcpp->th_flags |= TH_PUSH; /* force the PSH flag in TCP hdr */ #endif tcpp->th_sum = tcp_cksum(bip); pkt->nb_prot = (char*)(bip + 1); /* point past IP header */ pkt->nb_plen = data->m_len - sizeof(struct ip); e = ip_write(IPPROTO_TCP, pkt); /* ip_write() is now responsable for data->pkt, so... */ data->pkt = NULL; m_freem(data); if (e < 0) { /* don't report dropped sends, it causes socket applications to bail when a TCP retry will fix the problem */ if (e == SEND_DROPPED) return 0; return e; } else return 0; }
int arp_flood (void * pio) { PACKET arppkt; struct ethhdr * ethhdr; struct arp_hdr * arphdr; NET net; long i; int e; ip_addr ipaddr; #ifdef MULTI_HOMED ip_addr phost; /* phoney host for pass to iproute */ net = iproute(activehost, &phost); #else net = nets[0]; #endif if (!net) { ns_printf(pio, "ARP flood: no route"); return -1; } ns_printf(pio, "sending ARP flood of %ld pkts to %u.%u.%u.%u..", pktcount, PUSH_IPADDR(activehost) ); for (i = 0; i < pktcount; i++) { if (pktwait("ARP", pio)) return -1; /******** code cribbed from et_arp.c: ********/ LOCK_NET_RESOURCE(FREEQ_RESID); arppkt = pk_alloc(arpsize); UNLOCK_NET_RESOURCE(FREEQ_RESID); if (!arppkt) return ENP_RESOURCE; arppkt->nb_prot = arppkt->nb_buff; arppkt->nb_plen = arpsize; arppkt->net = net; /* build arp request packet */ ethhdr = (struct ethhdr *)(arppkt->nb_buff + ETHHDR_BIAS); /* ethernet header at start of buffer */ arphdr = (struct arp_hdr *)(arppkt->nb_buff + ETHHDR_SIZE); /* arp header follows */ arphdr->ar_hd = ARPHW; /* net endian arp hardware type (ethernet) */ arphdr->ar_pro = ARPIP; arphdr->ar_hln = 6; arphdr->ar_pln = 4; arphdr->ar_op = ARREQ; arphdr->ar_tpa = activehost; /* target's IP address */ /* FLOOD TEST MOD: just for grins, rotate our IP address so we * flood everybody's arp tables. Remember that we store IP * addresses */ ipaddr = i & (0x00FFFFFE & htonl(~net->snmask)); /* make host portion */ arphdr->ar_spa = (net->n_ipaddr | htonl(ipaddr)); /* add net portion */ MEMCPY(arphdr->ar_sha, net->n_mib->ifPhysAddress, 6); MEMSET(&(ethhdr->e_dst[0]), 0xFF, 6); /* destination to broadcast (all FFs) */ MEMCPY(ethhdr->e_src, net->n_mib->ifPhysAddress, 6); ethhdr->e_type = ET_ARP; /* 0x0806 - ARP type on ethernet */ #ifdef NO_CC_PACKING /* move ARP fields to proper network boundaries */ { struct arp_wire * arwp = (struct arp_wire *)arphdr; MEMMOVE(&arwp->data[AR_SHA], arphdr->ar_sha, 6); MEMMOVE(&arwp->data[AR_SPA], &arphdr->ar_spa, 4); MEMMOVE(&arwp->data[AR_THA], arphdr->ar_tha, 6); MEMMOVE(&arwp->data[AR_TPA], &arphdr->ar_tpa, 4); } #endif /* NO_CC_PACKING */ /* send arp request - if a packet oriented send exists, use it: */ if (net->pkt_send) e = net->pkt_send(arppkt); /* driver should free arppkt later */ else /* use old raw send */ { e = net->raw_send(arppkt->net, arppkt->nb_buff, arpsize); LOCK_NET_RESOURCE(FREEQ_RESID); pk_free(arppkt); UNLOCK_NET_RESOURCE(FREEQ_RESID); } arpReqsOut++; /******** end of code cribbed from et_arp.c: ********/ if (e < 0) { ns_printf(pio, "ARP flood send error %d on pkt %ld\n",e,i); return -1; } if ((i & 0x0f) == 0x0f) ns_printf(pio, "."); } return 0; }