/* In red alert mode, blow away the whole reassembly queue. Otherwise crunch * each fragment on each reassembly descriptor */ void ip_garbage( int red ){ struct reasm *rp,*rp1; struct frag *fp; struct raw_ip *rwp; struct iface *ifp; /* Run through the reassembly queue */ for(rp = Reasmq;rp != NULL;rp = rp1){ rp1 = rp->next; if(red){ free_reasm(rp); } else { for(fp = rp->fraglist;fp != NULL;fp = fp->next){ mbuf_crunch(&fp->buf); } } } /* Run through the raw IP queue */ for(rwp = Raw_ip;rwp != NULL;rwp = rwp->next) mbuf_crunch(&rwp->rcvq); /* Walk through interface output queues and decrement IP TTLs. * Discard and return ICMP TTL exceeded messages for any that * go to zero. (Some argue that this ought to be done all the * time, but it would probably break a lot of machines with * small IP TTL settings using amateur packet radio paths.) * * Also send an ICMP source quench message to one * randomly chosen packet on each queue. If in red mode, * also drop the packet. */ for(ifp=Ifaces;ifp != NULL;ifp = ifp->next){ ttldec(ifp); rquench(ifp,red); } }
/* TCP garbage collection - called by storage allocator when free space * runs low. The send and receive queues are crunched. If the situation * is red, the resequencing queue is discarded; otherwise it is * also crunched. */ void tcp_garbage( int red) { struct tcb *tcb; struct reseq *rp,*rp1; for(tcb = Tcbs;tcb != NULL;tcb = tcb->next){ mbuf_crunch(&tcb->rcvq); mbuf_crunch(&tcb->sndq); for(rp = tcb->reseq;rp != NULL;rp = rp1){ rp1 = rp->next; if(red){ free_p(&rp->bp); free(rp); } else { mbuf_crunch(&rp->bp); } } if(red) tcb->reseq = NULL; } }