static void destroy(isc__timer_t *timer) { isc__timermgr_t *manager = timer->manager; /* * The caller must ensure it is safe to destroy the timer. */ LOCK(&manager->lock); (void)isc_task_purgerange(timer->task, timer, ISC_TIMEREVENT_FIRSTEVENT, ISC_TIMEREVENT_LASTEVENT, NULL); deschedule(timer); UNLINK(manager->timers, timer, link); UNLOCK(&manager->lock); isc_task_detach(&timer->task); DESTROYLOCK(&timer->lock); timer->common.impmagic = 0; timer->common.magic = 0; isc_mem_put(manager->mctx, timer, sizeof(*timer)); }
void cl_copyBuffer(cl_mem dest, int destOffset, cl_mem src, int srcOffset, size_t size,int *index,cl_event *eventList,int *Flag_CPU_GPU,double * burden, int _CPU_GPU) { int preFlag=(*Flag_CPU_GPU); double preBurden=(*burden); int CPU_GPU=0; CPU_GPU=cl_copyBufferscheduler(size,Flag_CPU_GPU,burden,_CPU_GPU); cl_int ciErr1; (*Flag_CPU_GPU)=CPU_GPU; if(*index!=0) { ciErr1 = clEnqueueCopyBuffer(CommandQueue[CPU_GPU], src, dest, srcOffset, destOffset, size, 1, &eventList[((*index)-1)%2], &eventList[(*index)%2]); deschedule(preFlag,preBurden); } else ciErr1 = clEnqueueCopyBuffer(CommandQueue[CPU_GPU], src, dest, srcOffset, destOffset, size, 0, NULL, &eventList[*index]); (*index)++; //clEnqueueWriteBuffer(CommandQueue[CPU_GPU], to, CL_FALSE, 0, size, from, 0, NULL, NULL); if (ciErr1 != CL_SUCCESS) { printf("Error %d in cl_copyBuffer, Line %u in file %s !!!\n\n", ciErr1,__LINE__, __FILE__); cl_clean(EXIT_FAILURE); } clFlush(CommandQueue[CPU_GPU]); }
extern "C" void CL_getValueList( cl_mem h_Rin, int rLen, cl_mem* h_ValueList,int numThreadPB, int numBlock,int _CPU_GPU) { cl_event eventList[2]; int index=0; cl_kernel Kernel; int CPU_GPU; double burden; int outputSize=sizeof(int)*rLen; CL_MALLOC(h_ValueList, outputSize); cl_mem d_tempOutput; CL_MALLOC(&d_tempOutput, outputSize); int numThread=numThreadPB*numBlock; dim3 thread( numThreadPB, 1, 1); dim3 grid( numBlock, 1 , 1); // getValueList_kernel(Record *d_R, int delta, int rLen,int *d_ValueList, int *d_output) // getValueList_kernel<<<grid,thread>>>(d_Rin, numThread, rLen, *d_ValueList, d_tempOutput); getValueListImpl(h_Rin,numThread,rLen,h_ValueList,d_tempOutput,numThreadPB,numBlock,&index,eventList,&Kernel,&CPU_GPU,&burden,_CPU_GPU); clWaitForEvents(1,&eventList[(index-1)%2]); deschedule(CPU_GPU,burden); clReleaseKernel(Kernel); clReleaseEvent(eventList[0]); CL_FREE(d_tempOutput); //bufferchecking(*h_ValueList,sizeof(Record)*1); }
extern "C" void CL_setRIDList(cl_mem h_RIDList, int rLen, cl_mem h_destRin, int numThreadPB, int numBlock,int _CPU_GPU) { cl_event eventList[2]; int index=0; cl_kernel Kernel; int CPU_GPU; double burden; //// int outputSize=sizeof(int)*rLen; cl_mem d_tempOutput; CL_MALLOC(&d_tempOutput, outputSize); int numThread=numThreadPB*numBlock; dim3 thread( numThreadPB, 1, 1);//???? dim3 grid( numBlock, 1 , 1);//?????????? setRIDListImpl (h_RIDList,d_tempOutput,numThread, rLen, h_destRin, numThreadPB, numBlock,&index,eventList,&Kernel,&CPU_GPU,&burden,_CPU_GPU); clWaitForEvents(1,&eventList[(index-1)%2]); deschedule(CPU_GPU,burden); clReleaseKernel(Kernel); clReleaseEvent(eventList[0]); CL_FREE(d_tempOutput); //bufferchecking(h_destRin,sizeof(Record)*1); }
extern "C" void CL_setValueList(cl_mem h_ValueList, int rLen, cl_mem h_destRin, int numThreadPB, int numBlock,int _CPU_GPU) { cl_event eventList[2]; int index=0; cl_kernel Kernel; int CPU_GPU; double burden; int numThread=numThreadPB*numBlock; dim3 thread( numThreadPB, 1, 1); dim3 grid( numBlock, 1 , 1); setValueListImpl(h_ValueList,numThread,rLen,h_destRin,numThreadPB,numBlock,&index,eventList,&Kernel,&CPU_GPU,&burden,_CPU_GPU); clWaitForEvents(1,&eventList[(index-1)%2]); deschedule(CPU_GPU,burden); clReleaseKernel(Kernel); clReleaseEvent(eventList[0]); //bufferchecking(h_destRin,sizeof(Record)*1); }
/** @brief Function that implements the wait for the condition variable * * Put the thread in the waiting list of the condition variable and * block the thread until it receives a signal to wake up. * * @param cv condition variable that is waited on * @param mp the mutex variable be acquired when signalled * @return void */ void cond_wait (cond_t * cv, mutex_t * mp) { int status = 0; assert (cv != NULL); assert (mp != NULL); /* If cond_variable has been destroyed, do nothing and return */ if (cv->status == COND_DESTORY) { lprintf ("The cond_variable has been destroyed already"); return; } /* Allocate memory for condition variable */ cond_t *node_ptr = malloc (sizeof (cond_t)); /* Initialize the struct to store the thread's it and * prepare to insert */ Q_INIT_ELEM (node_ptr, cond_link); node_ptr->tid = gettid (); /* Lock the queue to store the thread info */ spin_lock_request (&cv->spinlock); /* Insert into the tail of the waiting list */ Q_INSERT_TAIL (&cv->waiting_list, node_ptr, cond_link); /* Release the mutex mp passed in */ mutex_unlock (mp); /* Release the lock of the queue */ spin_lock_release (&cv->spinlock); /* Deschedule the thread who has inserted into the queue */ deschedule (&status); /* Lock the passed mutex mp again */ mutex_lock (mp); return; }
void testScanImpl(int rLen) { int _CPU_GPU=0; cl_event eventList[2]; int index=0; cl_kernel Kernel; int CPU_GPU; double burden; int result=0; int memSize=sizeof(int)*rLen; int outSize=sizeof(int)*rLen; void *Rin; HOST_MALLOC(Rin, memSize); generateRandInt((int*)Rin, rLen,rLen,0); void *Rout; HOST_MALLOC(Rout, outSize); cl_mem d_Rin; CL_MALLOC(&d_Rin, memSize); cl_mem d_Rout; CL_MALLOC(&d_Rout, outSize); cl_writebuffer(d_Rin, Rin, memSize,&index,eventList,&CPU_GPU,&burden,_CPU_GPU); ScanPara *SP; SP=(ScanPara*)malloc(sizeof(ScanPara)); initScan(rLen,SP); scanImpl(d_Rin,rLen,d_Rout,&index,eventList,&Kernel,&CPU_GPU,&burden,SP,_CPU_GPU); cl_readbuffer(Rout, d_Rout, outSize,&index,eventList,&CPU_GPU,&burden,_CPU_GPU); clWaitForEvents(1,&eventList[(index-1)%2]); closeScan(SP); deschedule(CPU_GPU,burden); //validateScan( (int*)Rin, rLen, (int*)Rout ); HOST_FREE(Rin); HOST_FREE(Rout); CL_FREE(d_Rin); CL_FREE(d_Rout); clReleaseKernel(Kernel); clReleaseEvent(eventList[0]); clReleaseEvent(eventList[1]); }
extern "C" int CL_GroupBy(Record * h_Rin, int rLen, Record* h_Rout, int** h_startPos, int numThread, int numBlock , int _CPU_GPU) { cl_mem d_Rin; cl_mem d_Rout; cl_mem d_startPos; ///////////////////////////////////////////////////////////////////////////////////////////////////////////// cl_event eventList[2]; int index=0; cl_kernel Kernel; int CPU_GPU; double burden; int memSize = sizeof(Record)*rLen; CL_MALLOC( &d_Rin, memSize ); CL_MALLOC(&d_Rout, memSize ); cl_writebuffer( d_Rin, h_Rin, memSize,&index,eventList,&CPU_GPU,&burden,_CPU_GPU); int numGroup = 0; numGroup= groupByImpl(d_Rin, rLen, d_Rout, &d_startPos, numThread, numBlock,&index,eventList,&Kernel,&CPU_GPU,&burden,_CPU_GPU); (*h_startPos) = (int*)malloc( sizeof(int)*numGroup ); cl_readbuffer( *h_startPos, d_startPos, sizeof(int)*numGroup,&index,eventList,&CPU_GPU,&burden,_CPU_GPU); cl_readbuffer( h_Rout, d_Rout, sizeof(Record)*rLen,&index,eventList,&CPU_GPU,&burden,_CPU_GPU); clWaitForEvents(1,&eventList[(index-1)%2]); deschedule(CPU_GPU,burden); CL_FREE( d_Rin ); CL_FREE( d_Rout ); CL_FREE( d_startPos ); clReleaseKernel(Kernel); clReleaseEvent(eventList[0]); clReleaseEvent(eventList[1]); printf("CL_GroupBy\n"); return numGroup; }
void network_thread () { /* * We loop forever waiting on either data from the ppp drivers or from * our network socket. Control handling is no longer done here. */ struct sockaddr_in from; struct in_pktinfo to; unsigned int fromlen; int tunnel, call; /* Tunnel and call */ int recvsize; /* Length of data received */ struct buffer *buf; /* Payload buffer */ struct call *c, *sc; /* Call to send this off to */ struct tunnel *st; /* Tunnel */ fd_set readfds; /* Descriptors to watch for reading */ int max; /* Highest fd */ struct timeval tv, *ptv; /* Timeout for select */ struct msghdr msgh; struct iovec iov; char cbuf[256]; unsigned int refme, refhim; int * currentfd; int server_socket_processed; #ifdef HIGH_PRIO /* set high priority */ if (setpriority(PRIO_PROCESS, 0, -20) < 0) l2tp_log (LOG_INFO, "xl2tpd: can't set priority to high: %m"); #endif /* This one buffer can be recycled for everything except control packets */ buf = new_buf (MAX_RECV_SIZE); tunnel = 0; call = 0; for (;;) { int ret; process_signal(); max = build_fdset (&readfds); ptv = process_schedule(&tv); ret = select (max + 1, &readfds, NULL, NULL, ptv); if (ret <= 0) { #ifdef DEBUG_MORE if (ret == 0) { if (gconfig.debug_network) { l2tp_log (LOG_DEBUG, "%s: select timeout\n", __FUNCTION__); } } else { if (gconfig.debug_network) { l2tp_log (LOG_DEBUG, "%s: select returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } } #endif continue; } if (FD_ISSET (control_fd, &readfds)) { do_control (); } server_socket_processed = 0; currentfd = NULL; st = tunnels.head; while (st || !server_socket_processed) { if (st && (st->udp_fd == -1)) { st=st->next; continue; } if (st) { currentfd = &st->udp_fd; } else { currentfd = &server_socket; server_socket_processed = 1; } if (FD_ISSET (*currentfd, &readfds)) { /* * Okay, now we're ready for reading and processing new data. */ recycle_buf (buf); /* Reserve space for expanding payload packet headers */ buf->start += PAYLOAD_BUF; buf->len -= PAYLOAD_BUF; memset(&from, 0, sizeof(from)); memset(&to, 0, sizeof(to)); fromlen = sizeof(from); memset(&msgh, 0, sizeof(struct msghdr)); iov.iov_base = buf->start; iov.iov_len = buf->len; msgh.msg_control = cbuf; msgh.msg_controllen = sizeof(cbuf); msgh.msg_name = &from; msgh.msg_namelen = fromlen; msgh.msg_iov = &iov; msgh.msg_iovlen = 1; msgh.msg_flags = 0; /* Receive one packet. */ recvsize = recvmsg(*currentfd, &msgh, 0); if (recvsize < MIN_PAYLOAD_HDR_LEN) { if (recvsize < 0) { if (errno == ECONNREFUSED) { close(*currentfd); } if ((errno == ECONNREFUSED) || (errno == EBADF)) { *currentfd = -1; } if (errno != EAGAIN) l2tp_log (LOG_WARNING, "%s: recvfrom returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } else { l2tp_log (LOG_WARNING, "%s: received too small a packet\n", __FUNCTION__); } if (st) st=st->next; continue; } refme=refhim=0; struct cmsghdr *cmsg; /* Process auxiliary received data in msgh */ for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; cmsg = CMSG_NXTHDR(&msgh,cmsg)) { /* extract destination(our) addr */ if (cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_PKTINFO) { struct in_pktinfo* pktInfo = ((struct in_pktinfo*)CMSG_DATA(cmsg)); to = *pktInfo; } /* extract IPsec info out */ else if (gconfig.ipsecsaref && cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == gconfig.sarefnum) { unsigned int *refp; refp = (unsigned int *)CMSG_DATA(cmsg); refme =refp[0]; refhim=refp[1]; } } /* * some logic could be added here to verify that we only * get L2TP packets inside of IPsec, or to provide different * classes of service to packets not inside of IPsec. */ buf->len = recvsize; fix_hdr (buf->start); extract (buf->start, &tunnel, &call); if (gconfig.debug_network) { l2tp_log(LOG_DEBUG, "%s: recv packet from %s, size = %d, " "tunnel = %d, call = %d ref=%u refhim=%u\n", __FUNCTION__, inet_ntoa (from.sin_addr), recvsize, tunnel, call, refme, refhim); } if (gconfig.packet_dump) { do_packet_dump (buf); } if (!(c = get_call (tunnel, call, from.sin_addr, from.sin_port, refme, refhim))) { if ((c = get_tunnel (tunnel, from.sin_addr.s_addr, from.sin_port))) { /* * It is theoretically possible that we could be sent * a control message (say a StopCCN) on a call that we * have already closed or some such nonsense. To * prevent this from closing the tunnel, if we get a * call on a valid tunnel, but not with a valid CID, * we'll just send a ZLB to ack receiving the packet. */ if (gconfig.debug_tunnel) l2tp_log (LOG_DEBUG, "%s: no such call %d on tunnel %d. Sending special ZLB\n", __FUNCTION__, call, tunnel); if (handle_special (buf, c, call) == 0) /* get a new buffer */ buf = new_buf (MAX_RECV_SIZE); } #ifdef DEBUG_MORE else{ l2tp_log (LOG_DEBUG, "%s: unable to find call or tunnel to handle packet. call = %d, tunnel = %d Dumping.\n", __FUNCTION__, call, tunnel); } #endif } else { if (c->container) { c->container->my_addr = to; } buf->peer = from; /* Handle the packet */ c->container->chal_us.vector = NULL; if (handle_packet (buf, c->container, c)) { if (gconfig.debug_tunnel) l2tp_log (LOG_DEBUG, "%s: bad packet\n", __FUNCTION__); } if (c->cnu) { /* Send Zero Byte Packet */ control_zlb (buf, c->container, c); c->cnu = 0; } } } if (st) st=st->next; } /* * finished obvious sources, look for data from PPP connections. */ st = tunnels.head; while (st) { sc = st->call_head; while (sc) { if ((sc->fd >= 0) && FD_ISSET (sc->fd, &readfds)) { /* Got some payload to send */ int result; while ((result = read_packet (sc)) > 0) { add_payload_hdr (sc->container, sc, sc->ppp_buf); if (gconfig.packet_dump) { do_packet_dump (sc->ppp_buf); } sc->prx = sc->data_rec_seq_num; if (sc->zlb_xmit) { deschedule (sc->zlb_xmit); sc->zlb_xmit = NULL; } sc->tx_bytes += sc->ppp_buf->len; sc->tx_pkts++; udp_xmit (sc->ppp_buf, st); recycle_payload (sc->ppp_buf, sc->container->peer); } if (result != 0) { l2tp_log (LOG_WARNING, "%s: tossing read packet, error = %s (%d). Closing call.\n", __FUNCTION__, strerror (-result), -result); strcpy (sc->errormsg, strerror (-result)); sc->needclose = -1; } } sc = sc->next; } st = st->next; } } }
void destroy_call (struct call *c) { /* * Here, we unconditionally destroy a call. */ struct call *p; struct timeval tv; pid_t pid; /* * Close the tty */ if (c->fd > 0) close (c->fd); /* if (c->dethrottle) deschedule(c->dethrottle); */ if (c->zlb_xmit) deschedule (c->zlb_xmit); #ifdef IP_ALLOCATION if (c->addr) unreserve_addr (c->addr); #endif /* * Kill off pppd and wait for it to * return to us. This should only be called * in rare cases if pppd hasn't already died * voluntarily */ pid = c->pppd; if (pid) { /* Set c->pppd to zero to prevent recursion with child_handler */ c->pppd = 0; kill (pid, SIGTERM); waitpid (pid, NULL, 0); } if (c->container) { #ifdef USE_KERNEL if (kernel_support) ioctl (server_socket, L2TPIOCDELCALL, (c->container->ourtid << 16) | (c->ourcid)); #endif p = c->container->call_head; /* * Remove us from the call list, although * we might not actually be there */ if (p) { if (p == c) { c->container->call_head = c->next; c->container->count--; } else { while (p->next && (p->next != c)) p = p->next; if (p->next) { p->next = c->next; c->container->count--; } } } } if (c->lac) { c->lac->c = NULL; if (c->lac->redial && (c->lac->rtimeout > 0) && !c->lac->rsched && c->lac->active) { #ifdef DEBUG_MAGIC l2tp_log (LOG_LOG, "%s: Will redial in %d seconds\n", __FUNCTION__, c->lac->rtimeout); #endif tv.tv_sec = c->lac->rtimeout; tv.tv_usec = 0; c->lac->rsched = schedule (tv, magic_lac_dial, c->lac); } } free (c); }
void call_close (struct call *c) { struct buffer *buf; struct schedule_entry *se, *ose; struct call *tmp, *tmp2; if (!c || !c->container) { l2tp_log (LOG_DEBUG, "%s: called on null call or containerless call\n", __FUNCTION__); return; } if (c == c->container->self) { /* * We're actually closing the * entire tunnel */ /* First deschedule any remaining packet transmissions for this tunnel. That means Hello's and any reminaing packets scheduled for transmission. This is a very nasty little piece of code here. */ se = events; ose = NULL; while (se) { if ((((struct buffer *) se->data)->tunnel == c->container) || ((struct tunnel *) se->data == c->container)) { #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Descheduling event\n", __FUNCTION__); #endif if (ose) { ose->next = se->next; if ((struct tunnel *) se->data != c->container) toss ((struct buffer *) (se->data)); free (se); se = ose->next; } else { events = se->next; if ((struct tunnel *) se->data != c->container) toss ((struct buffer *) (se->data)); free (se); se = events; } } else { ose = se; se = se->next; } } if (c->closing) { /* Really close this tunnel, as our StopCCN has been ack'd */ #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Actually closing tunnel %d\n", __FUNCTION__, c->container->ourtid); #endif #ifdef USE_KERNEL if (kernel_support) ioctl (server_socket, L2TPIOCDELTUNNEL, c->container->ourtid); #endif destroy_tunnel (c->container); return; } /* * We need to close, but need to provide reliable delivery * of the final StopCCN. We record our state to know when * we have actually received an ACK on our StopCCN */ c->closeSs = c->container->control_seq_num; buf = new_outgoing (c->container); add_message_type_avp (buf, StopCCN); if (c->container->hbit) { mk_challenge (c->container->chal_them.vector, VECTOR_SIZE); add_randvect_avp (buf, c->container->chal_them.vector, VECTOR_SIZE); } add_tunnelid_avp (buf, c->container->ourtid); if (c->result < 0) c->result = RESULT_CLEAR; if (c->error < 0) c->error = 0; add_result_code_avp (buf, c->result, c->error, c->errormsg, strlen (c->errormsg)); add_control_hdr (c->container, c, buf); if (packet_dump) do_packet_dump (buf); #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: enqueing close message for tunnel\n", __FUNCTION__); #endif control_xmit (buf); /* * We also need to stop all traffic on any calls contained * within us. */ tmp = c->container->call_head; while (tmp) { tmp2 = tmp->next; tmp->needclose = 0; tmp->closing = -1; call_close (tmp); tmp = tmp2; } /* mf, 16.04.2003: change log message to show tunneltag */ // l2tp_log (LOG_LOG, // "%s : Connection %d closed to %s, port %d (%s)\n", __FUNCTION__, // c->container->tid, // IPADDY (c->container->peer.sin_addr), // ntohs (c->container->peer.sin_port), c->errormsg); l2tp_log (LOG_LOG, "%s : Connection closed with peer %s, reason: %s\n", __FUNCTION__, c->container->tunneltag, c->errormsg); } else { /* * Just close a call */ #ifdef USE_KERNEL struct l2tp_call_opts co; #endif if (c->zlb_xmit) deschedule (c->zlb_xmit); /* if (c->dethrottle) deschedule(c->dethrottle); */ if (c->closing) { #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Actually closing call %d\n", __FUNCTION__, c->ourcid); #endif destroy_call (c); return; } #ifdef USE_KERNEL if (kernel_support) { co.ourtid = c->container->ourtid; co.ourcid = c->ourcid; ioctl (server_socket, L2TPIOCGETCALLOPTS, &co); co.flags = co.flags & ~L2TP_FLAG_CALL_UP; ioctl (server_socket, L2TPIOCSETCALLOPTS, &co); } #endif c->closeSs = c->container->control_seq_num; buf = new_outgoing (c->container); add_message_type_avp (buf, CDN); if (c->container->hbit) { mk_challenge (c->container->chal_them.vector, VECTOR_SIZE); add_randvect_avp (buf, c->container->chal_them.vector, VECTOR_SIZE); } if (c->result < 0) c->result = RESULT_CLEAR; if (c->error < 0) c->error = 0; add_result_code_avp (buf, c->result, c->error, c->errormsg, strlen (c->errormsg)); #ifdef TEST_HIDDEN add_callid_avp (buf, c->ourcid, c->container); #else add_callid_avp (buf, c->ourcid); #endif add_control_hdr (c->container, c, buf); if (packet_dump) do_packet_dump (buf); #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: enqueuing close message for call %d\n", __FUNCTION__, c->ourcid); #endif control_xmit (buf); l2tp_log (LOG_LOG, "%s: Call %d to %s disconnected\n", __FUNCTION__, c->ourcid, IPADDY (c->container->peer.sin_addr)); } /* * Note that we're in the process of closing now */ c->closing = -1; }
/****f* spin1_api.c/spin1_flush_rx_packet_queue * * SUMMARY * This function effectively discards all received packets which are yet * to be processed by calling deschedule(MC_PACKET_RECEIVED). * * SYNOPSIS * void spin1_flush_rx_packet_queue() * * SOURCE */ void spin1_flush_rx_packet_queue() { deschedule(MC_PACKET_RECEIVED); deschedule(MCPL_PACKET_RECEIVED); }
void destroy_call (struct call *c) { /* * Here, we unconditionally destroy a call. */ struct call *p; struct timeval tv; pid_t pid; /* * Close the tty */ if (c->fd > 0) close (c->fd); /* if (c->dethrottle) deschedule(c->dethrottle); */ if (c->zlb_xmit) deschedule (c->zlb_xmit); #ifdef IP_ALLOCATION if (c->addr) unreserve_addr (c->addr); #endif /* * Kill off pppd and wait for it to * return to us. This should only be called * in rare cases if pppd hasn't already died * voluntarily */ pid = c->pppd; if (pid) { /* Set c->pppd to zero to prevent recursion with child_handler */ c->pppd = 0; /* * There is a bug in some pppd versions where sending a SIGTERM * does not actually seem to kill pppd, and xl2tpd waits indefinately * using waitpid, not accepting any new connections either. Therefor * we now use some more force and send it a SIGKILL instead of SIGTERM. * One confirmed buggy version of pppd is ppp-2.4.2-6.4.RHEL4 * See http://bugs.xelerance.com/view.php?id=739 * * Sometimes pppd takes 7 sec to go down! We don't have that much time, * since all other calls are suspended while doing this. */ #ifdef TRUST_PPPD_TO_DIE #ifdef DEBUG_PPPD l2tp_log (LOG_DEBUG, "Terminating pppd: sending TERM signal to pid %d\n", pid); #endif kill (pid, SIGTERM); #else #ifdef DEBUG_PPPD l2tp_log (LOG_DEBUG, "Terminating pppd: sending KILL signal to pid %d\n", pid); #endif kill (pid, SIGTERM); #endif } if (c->container) { p = c->container->call_head; /* * Remove us from the call list, although * we might not actually be there */ if (p) { if (p == c) { c->container->call_head = c->next; c->container->count--; } else { while (p->next && (p->next != c)) p = p->next; if (p->next) { p->next = c->next; c->container->count--; } } } } if (c->lac) { c->lac->c = NULL; if (c->lac->redial && (c->lac->rtimeout > 0) && !c->lac->rsched && c->lac->active) { #ifdef DEBUG_MAGIC l2tp_log (LOG_DEBUG, "Will redial in %d seconds\n", c->lac->rtimeout); #endif tv.tv_sec = c->lac->rtimeout; tv.tv_usec = 0; c->lac->rsched = schedule (tv, magic_lac_dial, c->lac); } } free (c); }
void call_close (struct call *c) { struct buffer *buf; struct schedule_entry *se, *ose; struct call *tmp, *tmp2; if (!c || !c->container) { l2tp_log (LOG_DEBUG, "%s: called on null call or containerless call\n", __FUNCTION__); return; } if (c == c->container->self) { /* * We're actually closing the * entire tunnel */ /* First deschedule any remaining packet transmissions for this tunnel. That means Hello's and any reminaing packets scheduled for transmission. This is a very nasty little piece of code here. */ se = events; ose = NULL; while (se) { if ((((struct buffer *) se->data)->tunnel == c->container) || ((struct tunnel *) se->data == c->container)) { #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Descheduling event\n", __FUNCTION__); #endif if (ose) { ose->next = se->next; if ((struct tunnel *) se->data != c->container) toss ((struct buffer *) (se->data)); free (se); se = ose->next; } else { events = se->next; if ((struct tunnel *) se->data != c->container) toss ((struct buffer *) (se->data)); free (se); se = events; } } else { ose = se; se = se->next; } } if (c->closing) { /* Really close this tunnel, as our StopCCN has been ack'd */ #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Actually closing tunnel %d\n", __FUNCTION__, c->container->ourtid); #endif destroy_tunnel (c->container); return; } /* * We need to close, but need to provide reliable delivery * of the final StopCCN. We record our state to know when * we have actually received an ACK on our StopCCN */ c->closeSs = c->container->control_seq_num; buf = new_outgoing (c->container); add_message_type_avp (buf, StopCCN); if (c->container->hbit) { mk_challenge (c->container->chal_them.vector, VECTOR_SIZE); add_randvect_avp (buf, c->container->chal_them.vector, VECTOR_SIZE); } add_tunnelid_avp (buf, c->container->ourtid); if (c->result < 0) c->result = RESULT_CLEAR; if (c->error < 0) c->error = 0; add_result_code_avp (buf, c->result, c->error, c->errormsg, strlen (c->errormsg)); add_control_hdr (c->container, c, buf); if (gconfig.packet_dump) do_packet_dump (buf); #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: enqueing close message for tunnel\n", __FUNCTION__); #endif control_xmit (buf); /* * We also need to stop all traffic on any calls contained * within us. */ tmp = c->container->call_head; while (tmp) { tmp2 = tmp->next; tmp->needclose = 0; tmp->closing = -1; call_close (tmp); tmp = tmp2; } l2tp_log (LOG_DEBUG, "Connection %d closed to %s, port %d (%s)\n", c->container->tid, IPADDY (c->container->peer.sin_addr), ntohs (c->container->peer.sin_port), c->errormsg); if(strcmp(c->errormsg,"Server closing") ) { if(!strcmp(c->errormsg,"goodbye!") ) l2tp_log (LOG_INFO, "Terminated by router connect %s, cause manual disconnect.\n", IPADDY (c->container->peer.sin_addr) ); else if( c->msgtype <= 0 || c->msgtype > 16 ) l2tp_log (LOG_INFO, "Detect %s from %s, port %d \n", c->errormsg, IPADDY (c->container->peer.sin_addr), ntohs (c->container->peer.sin_port)); else l2tp_log (LOG_INFO, "Detect %s %s from %s, port %d \n", msgtypes[c->msgtype],c->errormsg, IPADDY (c->container->peer.sin_addr), ntohs (c->container->peer.sin_port)); } //if( (!strcmp(c->errormsg, "Timeout")) && (c->container->tid != 0) ) // l2tp_log(LOG_INFO, "Terminated by router, cause no response to echo-requests."); } else { /* * Just close a call */ if (c->zlb_xmit) deschedule (c->zlb_xmit); /* if (c->dethrottle) deschedule(c->dethrottle); */ if (c->closing) { #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: Actually closing call %d\n", __FUNCTION__, c->ourcid); #endif destroy_call (c); return; } c->closeSs = c->container->control_seq_num; buf = new_outgoing (c->container); add_message_type_avp (buf, CDN); if (c->container->hbit) { mk_challenge (c->container->chal_them.vector, VECTOR_SIZE); add_randvect_avp (buf, c->container->chal_them.vector, VECTOR_SIZE); } if (c->result < 0) c->result = RESULT_CLEAR; if (c->error < 0) c->error = 0; add_result_code_avp (buf, c->result, c->error, c->errormsg, strlen (c->errormsg)); #ifdef TEST_HIDDEN add_callid_avp (buf, c->ourcid, c->container); #else add_callid_avp (buf, c->ourcid); #endif add_control_hdr (c->container, c, buf); if (gconfig.packet_dump) do_packet_dump (buf); #ifdef DEBUG_CLOSE l2tp_log (LOG_DEBUG, "%s: enqueuing close message for call %d\n", __FUNCTION__, c->ourcid); #endif control_xmit (buf); l2tp_log (LOG_DEBUG, "%s: Call %d to %s disconnected\n", __FUNCTION__, c->ourcid, IPADDY (c->container->peer.sin_addr)); } /* * Note that we're in the process of closing now */ c->closing = -1; }
void network_thread () { /* * We loop forever waiting on either data from the ppp drivers or from * our network socket. Control handling is no longer done here. */ int fromlen; /* Length of the address */ int tunnel, call; /* Tunnel and call */ int recvsize; /* Length of data received */ struct buffer *buf; /* Payload buffer */ struct call *c, *sc; /* Call to send this off to */ struct tunnel *st; /* Tunnel */ fd_set readfds; /* Descriptors to watch for reading */ int max; /* Highest fd */ struct timeval tv; /* Timeout for select */ /* This one buffer can be recycled for everything except control packets */ buf = new_buf (MAX_RECV_SIZE); gconfig.debug_tunnel = 1; for (;;) { max = build_fdset (&readfds); tv.tv_sec = 1; tv.tv_usec = 0; schedule_unlock (); select (max + 1, &readfds, NULL, NULL, NULL); schedule_lock (); if (FD_ISSET (control_fd, &readfds)) { do_control (); } if (FD_ISSET (server_socket, &readfds)) { /* * Okay, now we're ready for reading and processing new data. */ recycle_buf (buf); /* Reserve space for expanding payload packet headers */ buf->start += PAYLOAD_BUF; buf->len -= PAYLOAD_BUF; fromlen = sizeof (from); recvsize = recvfrom (server_socket, buf->start, buf->len, 0, (struct sockaddr *) &from, &fromlen); if (recvsize < MIN_PAYLOAD_HDR_LEN) { if (recvsize < 0) { if (errno != EAGAIN) log (LOG_WARN, "%s: recvfrom returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } else { log (LOG_WARN, "%s: received too small a packet\n", __FUNCTION__); } } else { buf->len = recvsize; if (gconfig.debug_network) { log (LOG_DEBUG, "%s: recv packet from %s, size = %d, " "tunnel = %d, call = %d\n", __FUNCTION__, inet_ntoa (from.sin_addr), recvsize, tunnel, call); } if (gconfig.packet_dump) { do_packet_dump (buf); } fix_hdr (buf->start); extract (buf->start, &tunnel, &call); if (! (c = get_call (tunnel, call, from.sin_addr.s_addr, from.sin_port))) { log(LOG_DEBUG, "%s(%d)\n", __FUNCTION__,__LINE__); if ((c = get_tunnel (tunnel, from.sin_addr.s_addr, from.sin_port))) { /* * It is theoretically possible that we could be sent * a control message (say a StopCCN) on a call that we * have already closed or some such nonsense. To prevent * this from closing the tunnel, if we get a call on a valid * tunnel, but not with a valid CID, we'll just send a ZLB * to ack receiving the packet. */ if (gconfig.debug_tunnel) log (LOG_DEBUG, "%s: no such call %d on tunnel %d. Sending special ZLB\n", __FUNCTION__); handle_special (buf, c, call); } else log (LOG_DEBUG, "%s: unable to find call or tunnel to handle packet. call = %d, tunnel = %d Dumping.\n", __FUNCTION__, call, tunnel); } else { buf->peer = from; /* Handle the packet */ c->container->chal_us.vector = NULL; if (handle_packet (buf, c->container, c)) { if (gconfig.debug_tunnel) log (LOG_DEBUG, "%s(%d): bad packet\n", __FUNCTION__,__LINE__); }; if (c->cnu) { /* Send Zero Byte Packet */ control_zlb (buf, c->container, c); c->cnu = 0; } } } }; st = tunnels.head; while (st) { sc = st->call_head; while (sc) { if ((sc->fd >= 0) && FD_ISSET (sc->fd, &readfds)) { /* Got some payload to send */ int result; recycle_payload (buf, sc->container->peer); #ifdef DEBUG_FLOW_MORE log (LOG_DEBUG, "%s: rws = %d, pSs = %d, pLr = %d\n", __FUNCTION__, sc->rws, sc->pSs, sc->pLr); #endif /* if ((sc->rws>0) && (sc->pSs > sc->pLr + sc->rws) && !sc->rbit) { #ifdef DEBUG_FLOW log(LOG_DEBUG, "%s: throttling payload (call = %d, tunnel = %d, Lr = %d, Ss = %d, rws = %d)!\n",__FUNCTION__, sc->cid, sc->container->tid, sc->pLr, sc->pSs, sc->rws); #endif sc->throttle = -1; We unthrottle in handle_packet if we get a payload packet, valid or ZLB, but we also schedule a dethrottle in which case the R-bit will be set FIXME: Rate Adaptive timeout? tv.tv_sec = 2; tv.tv_usec = 0; sc->dethrottle = schedule(tv, dethrottle, sc); } else */ /* while ((result=read_packet(buf,sc->fd,sc->frame & SYNC_FRAMING))>0) { */ while ((result = read_packet (buf, sc->fd, SYNC_FRAMING)) > 0) { add_payload_hdr (sc->container, sc, buf); if (gconfig.packet_dump) { do_packet_dump (buf); } sc->prx = sc->data_rec_seq_num; if (sc->zlb_xmit) { deschedule (sc->zlb_xmit); sc->zlb_xmit = NULL; } sc->tx_bytes += buf->len; sc->tx_pkts++; udp_xmit (buf); recycle_payload (buf, sc->container->peer); } if (result != 0) { log (LOG_WARN, "%s: tossing read packet, error = %s (%d). Closing call.\n", __FUNCTION__, strerror (-result), -result); strcpy (sc->errormsg, strerror (-result)); sc->needclose = -1; } } sc = sc->next; } st = st->next; } } }
void network_thread () { /* * We loop forever waiting on either data from the ppp drivers or from * our network socket. Control handling is no longer done here. */ struct sockaddr_in from, to; unsigned int fromlen, tolen; int tunnel, call; /* Tunnel and call */ int recvsize; /* Length of data received */ struct buffer *buf; /* Payload buffer */ struct call *c, *sc; /* Call to send this off to */ struct tunnel *st; /* Tunnel */ fd_set readfds; /* Descriptors to watch for reading */ int max; /* Highest fd */ struct timeval tv, *ptv; /* Timeout for select */ struct msghdr msgh; struct iovec iov; char cbuf[256]; unsigned int refme, refhim; /* This one buffer can be recycled for everything except control packets */ buf = new_buf (MAX_RECV_SIZE); tunnel = 0; call = 0; for (;;) { int ret; process_signal(); max = build_fdset (&readfds); ptv = process_schedule(&tv); ret = select (max + 1, &readfds, NULL, NULL, ptv); if (ret <= 0) { if (ret == 0) { if (gconfig.debug_network) { l2tp_log (LOG_DEBUG, "%s: select timeout\n", __FUNCTION__); } } else { if (gconfig.debug_network) { l2tp_log (LOG_DEBUG, "%s: select returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } } continue; } if (FD_ISSET (control_fd, &readfds)) { do_control (); } if (FD_ISSET (server_socket, &readfds)) { /* * Okay, now we're ready for reading and processing new data. */ recycle_buf (buf); /* Reserve space for expanding payload packet headers */ buf->start += PAYLOAD_BUF; buf->len -= PAYLOAD_BUF; memset(&from, 0, sizeof(from)); memset(&to, 0, sizeof(to)); fromlen = sizeof(from); tolen = sizeof(to); memset(&msgh, 0, sizeof(struct msghdr)); iov.iov_base = buf->start; iov.iov_len = buf->len; msgh.msg_control = cbuf; msgh.msg_controllen = sizeof(cbuf); msgh.msg_name = &from; msgh.msg_namelen = fromlen; msgh.msg_iov = &iov; msgh.msg_iovlen = 1; msgh.msg_flags = 0; /* Receive one packet. */ recvsize = recvmsg(server_socket, &msgh, 0); if (recvsize < MIN_PAYLOAD_HDR_LEN) { if (recvsize < 0) { if (errno != EAGAIN) l2tp_log (LOG_WARNING, "%s: recvfrom returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } else { l2tp_log (LOG_WARNING, "%s: received too small a packet\n", __FUNCTION__); } continue; } refme=refhim=0; /* extract IPsec info out */ if(gconfig.ipsecsaref) { struct cmsghdr *cmsg; /* Process auxiliary received data in msgh */ for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; cmsg = CMSG_NXTHDR(&msgh,cmsg)) { if (cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_IPSEC_REFINFO) { unsigned int *refp; refp = (unsigned int *)CMSG_DATA(cmsg); refme =refp[0]; refhim=refp[1]; } } } /* * some logic could be added here to verify that we only * get L2TP packets inside of IPsec, or to provide different * classes of service to packets not inside of IPsec. */ buf->len = recvsize; fix_hdr (buf->start); extract (buf->start, &tunnel, &call); if (gconfig.debug_network) { l2tp_log(LOG_DEBUG, "%s: recv packet from %s, size = %d, " "tunnel = %d, call = %d ref=%u refhim=%u\n", __FUNCTION__, inet_ntoa (from.sin_addr), recvsize, tunnel, call, refme, refhim); } if (gconfig.packet_dump) { do_packet_dump (buf); } if (! (c = get_call (tunnel, call, from.sin_addr.s_addr, from.sin_port, refme, refhim))) { if ((c = get_tunnel (tunnel, from.sin_addr.s_addr, from.sin_port))) { /* * It is theoretically possible that we could be sent * a control message (say a StopCCN) on a call that we * have already closed or some such nonsense. To * prevent this from closing the tunnel, if we get a * call on a valid tunnel, but not with a valid CID, * we'll just send a ZLB to ack receiving the packet. */ if (gconfig.debug_tunnel) l2tp_log (LOG_DEBUG, "%s: no such call %d on tunnel %d. Sending special ZLB\n", __FUNCTION__); handle_special (buf, c, call); /* get a new buffer */ buf = new_buf (MAX_RECV_SIZE); } else l2tp_log (LOG_DEBUG, "%s: unable to find call or tunnel to handle packet. call = %d, tunnel = %d Dumping.\n", __FUNCTION__, call, tunnel); } else { buf->peer = from; /* Handle the packet */ c->container->chal_us.vector = NULL; if (handle_packet (buf, c->container, c)) { if (gconfig.debug_tunnel) l2tp_log (LOG_DEBUG, "%s: bad packet\n", __FUNCTION__); }; if (c->cnu) { /* Send Zero Byte Packet */ control_zlb (buf, c->container, c); c->cnu = 0; } }; } /* * finished obvious sources, look for data from PPP connections. */ st = tunnels.head; while (st) { sc = st->call_head; while (sc) { if ((sc->fd >= 0) && FD_ISSET (sc->fd, &readfds)) { /* Got some payload to send */ int result; recycle_payload (buf, sc->container->peer); /* #ifdef DEBUG_FLOW_MORE l2tp_log (LOG_DEBUG, "%s: rws = %d, pSs = %d, pLr = %d\n", __FUNCTION__, sc->rws, sc->pSs, sc->pLr); #endif if ((sc->rws>0) && (sc->pSs > sc->pLr + sc->rws) && !sc->rbit) { #ifdef DEBUG_FLOW log(LOG_DEBUG, "%s: throttling payload (call = %d, tunnel = %d, Lr = %d, Ss = %d, rws = %d)!\n",__FUNCTION__, sc->cid, sc->container->tid, sc->pLr, sc->pSs, sc->rws); #endif sc->throttle = -1; We unthrottle in handle_packet if we get a payload packet, valid or ZLB, but we also schedule a dethrottle in which case the R-bit will be set FIXME: Rate Adaptive timeout? tv.tv_sec = 2; tv.tv_usec = 0; sc->dethrottle = schedule(tv, dethrottle, sc); } else */ /* while ((result=read_packet(buf,sc->fd,sc->frame & SYNC_FRAMING))>0) { */ while ((result = read_packet (buf, sc->fd, SYNC_FRAMING)) > 0) { add_payload_hdr (sc->container, sc, buf); if (gconfig.packet_dump) { do_packet_dump (buf); } sc->prx = sc->data_rec_seq_num; if (sc->zlb_xmit) { deschedule (sc->zlb_xmit); sc->zlb_xmit = NULL; } sc->tx_bytes += buf->len; sc->tx_pkts++; udp_xmit (buf, st); recycle_payload (buf, sc->container->peer); } if (result != 0) { l2tp_log (LOG_WARNING, "%s: tossing read packet, error = %s (%d). Closing call.\n", __FUNCTION__, strerror (-result), -result); strcpy (sc->errormsg, strerror (-result)); sc->needclose = -1; } } sc = sc->next; } st = st->next; } } }
ISC_TIMERFUNC_SCOPE isc_result_t isc__timer_reset(isc_timer_t *timer0, isc_timertype_t type, isc_time_t *expires, isc_interval_t *interval, isc_boolean_t purge) { isc__timer_t *timer = (isc__timer_t *)timer0; isc_time_t now; isc__timermgr_t *manager; isc_result_t result; /* * Change the timer's type, expires, and interval values to the given * values. If 'purge' is ISC_TRUE, any pending events from this timer * are purged from its task's event queue. */ REQUIRE(VALID_TIMER(timer)); manager = timer->manager; REQUIRE(VALID_MANAGER(manager)); if (expires == NULL) expires = isc_time_epoch; if (interval == NULL) interval = isc_interval_zero; REQUIRE(type == isc_timertype_inactive || !(isc_time_isepoch(expires) && isc_interval_iszero(interval))); REQUIRE(type != isc_timertype_limited || !(isc_time_isepoch(expires) || isc_interval_iszero(interval))); /* * Get current time. */ if (type != isc_timertype_inactive) { TIME_NOW(&now); } else { /* * We don't have to do this, but it keeps the compiler from * complaining about "now" possibly being used without being * set, even though it will never actually happen. */ isc_time_settoepoch(&now); } LOCK(&manager->lock); LOCK(&timer->lock); if (purge) (void)isc_task_purgerange(timer->task, timer, ISC_TIMEREVENT_FIRSTEVENT, ISC_TIMEREVENT_LASTEVENT, NULL); timer->type = type; timer->expires = *expires; timer->interval = *interval; if (type == isc_timertype_once && !isc_interval_iszero(interval)) { result = isc_time_add(&now, interval, &timer->idle); } else { isc_time_settoepoch(&timer->idle); result = ISC_R_SUCCESS; } if (result == ISC_R_SUCCESS) { if (type == isc_timertype_inactive) { deschedule(timer); result = ISC_R_SUCCESS; } else result = schedule(timer, &now, ISC_TRUE); } UNLOCK(&timer->lock); UNLOCK(&manager->lock); return (result); }
void network_thread () { /* * We loop forever waiting on either data from the ppp drivers or from * our network socket. Control handling is no longer done here. */ int fromlen; /* Length of the address */ int tunnel, call; /* Tunnel and call */ int recvsize; /* Length of data received */ struct buffer *buf; /* Payload buffer */ struct call *c, *sc; /* Call to send this off to */ struct tunnel *st; /* Tunnel */ fd_set readfds; /* Descriptors to watch for reading */ int max; /* Highest fd */ struct timeval tv; /* Timeout for select */ /* This one buffer can be recycled for everything except control packets */ buf = new_buf (MAX_RECV_SIZE); for (;;) { /* * First, let's send out any outgoing packets that are waiting on us. * xmit_udp should only * contain control packets in the unthreaded version! */ max = 0; FD_ZERO (&readfds); st = tunnels.head; while (st) { if (st->self->needclose ^ st->self->closing) { if (debug_tunnel) log (LOG_DEBUG, "%S: closing down tunnel %d\n", __FUNCTION__, st->ourtid); call_close (st->self); /* Reset the while loop and check for NULL */ st = tunnels.head; if (!st) break; continue; } sc = st->call_head; while (sc) { if (sc->needclose ^ sc->closing) { call_close (sc); sc = st->call_head; if (!sc) break; continue; } if (sc->fd > -1) { /* if (!sc->throttle && !sc->needclose && !sc->closing) { */ if (!sc->needclose && !sc->closing) { if (sc->fd > max) max = sc->fd; FD_SET (sc->fd, &readfds); } } sc = sc->next; } st = st->next; } FD_SET (server_socket, &readfds); if (server_socket > max) max = server_socket; FD_SET (control_fd, &readfds); if (control_fd > max) max = control_fd; tv.tv_sec = 1; tv.tv_usec = 0; /*add start, by MJ.*/ extern int is_first_run; if(is_first_run) { int lac_fp; /* to get conn_id which written by acos */ char cmd[64]={0}; char conn_id[64] = "c default"; lac_fp = fopen("/tmp/l2tp/l2tpd.info", "r"); if (lac_fp != NULL){ //fscanf(lac_fp, "%s", conn_id); fgets(conn_id, sizeof(conn_id), lac_fp); fclose(lac_fp); } else log (LOG_DEBUG, "open /tmp/l2tp/l2tpd.info fialed\n"); log (LOG_DEBUG, "%s: -> the first run.\n", __FUNCTION__); sprintf(cmd, "c %s", conn_id); //do_control("c MJ."); do_control(cmd); //write(control_fd, cmd, strlen(cmd) ); is_first_run = 0; } /*add end. by MJ.*/ schedule_unlock (); select (max + 1, &readfds, NULL, NULL, NULL); schedule_lock (); if (FD_ISSET (control_fd, &readfds)) { do_control (NULL); } if (FD_ISSET (server_socket, &readfds)) { /* wklin added start, 04/12/2011 */ extern void connect_pppunit(void); connect_pppunit(); /* wklin added end, 04/12/2011 */ /* * Okay, now we're ready for reading and processing new data. */ recycle_buf (buf); /* Reserve space for expanding payload packet headers */ buf->start += PAYLOAD_BUF; buf->len -= PAYLOAD_BUF; fromlen = sizeof (from); recvsize = recvfrom (server_socket, buf->start, buf->len, 0, (struct sockaddr *) &from, &fromlen); /* , by MJ. for debugging.*/ //log (LOG_DEBUG, "receive %d bytes from server_scoket.\n", recvsize); if (recvsize < MIN_PAYLOAD_HDR_LEN) { if (recvsize < 0) { if (errno != EAGAIN) log (LOG_WARN, "%s: recvfrom returned error %d (%s)\n", __FUNCTION__, errno, strerror (errno)); } else { log (LOG_WARN, "%s: received too small a packet\n", __FUNCTION__); } } else { buf->len = recvsize; fix_hdr (buf->start); extract (buf->start, &tunnel, &call); if (debug_network) { log (LOG_DEBUG, "%s: recv packet from %s, size = %d," "tunnel = %d, call = %d\n", __FUNCTION__, inet_ntoa (from.sin_addr), recvsize, tunnel, call); } if (packet_dump) { do_packet_dump (buf); } if (! (c = get_call (tunnel, call, from.sin_addr.s_addr, from.sin_port))) { if ((c = get_tunnel (tunnel, from.sin_addr.s_addr, from.sin_port))) { /* * It is theoretically possible that we could be sent * a control message (say a StopCCN) on a call that we * have already closed or some such nonsense. To prevent * this from closing the tunnel, if we get a call on a valid * tunnel, but not with a valid CID, we'll just send a ZLB * to ack receiving the packet. */ if (debug_tunnel) log (LOG_DEBUG, "%s: no such call %d on tunnel %d. Sending special ZLB\n", __FUNCTION__); handle_special (buf, c, call); } else log (LOG_DEBUG, "%s: unable to find call or tunnel to handle packet. call = %d, tunnel = %d Dumping.\n", __FUNCTION__, call, tunnel); } else { buf->peer = from; /* Handle the packet */ c->container->chal_us.vector = NULL; if (handle_packet (buf, c->container, c)) { if (debug_tunnel) log (LOG_DEBUG, "%s: bad packet\n", __FUNCTION__); }; if (c->cnu) { /* Send Zero Byte Packet */ control_zlb (buf, c->container, c); c->cnu = 0; } } } }; st = tunnels.head; while (st) { sc = st->call_head; while (sc) { if ((sc->fd >= 0) && FD_ISSET (sc->fd, &readfds)) { /* Got some payload to send */ int result; recycle_payload (buf, sc->container->peer); #ifdef DEBUG_FLOW_MORE log (LOG_DEBUG, "%s: rws = %d, pSs = %d, pLr = %d\n", __FUNCTION__, sc->rws, sc->pSs, sc->pLr); #endif /* if ((sc->rws>0) && (sc->pSs > sc->pLr + sc->rws) && !sc->rbit) { #ifdef DEBUG_FLOW log(LOG_DEBUG, "%s: throttling payload (call = %d, tunnel = %d, Lr = %d, Ss = %d, rws = %d)!\n",__FUNCTION__, sc->cid, sc->container->tid, sc->pLr, sc->pSs, sc->rws); #endif sc->throttle = -1; We unthrottle in handle_packet if we get a payload packet, valid or ZLB, but we also schedule a dethrottle in which case the R-bit will be set FIXME: Rate Adaptive timeout? tv.tv_sec = 2; tv.tv_usec = 0; sc->dethrottle = schedule(tv, dethrottle, sc); } else */ /* while ((result=read_packet(buf,sc->fd,sc->frame & SYNC_FRAMING))>0) { */ while ((result = read_packet (buf, sc->fd, SYNC_FRAMING)) > 0) { add_payload_hdr (sc->container, sc, buf); if (packet_dump) { do_packet_dump (buf); } sc->prx = sc->data_rec_seq_num; if (sc->zlb_xmit) { deschedule (sc->zlb_xmit); sc->zlb_xmit = NULL; } sc->tx_bytes += buf->len; sc->tx_pkts++; udp_xmit (buf); recycle_payload (buf, sc->container->peer); } if (result != 0) { log (LOG_WARN, "%s: tossing read packet, error = %s (%d). Closing call.\n", __FUNCTION__, strerror (-result), -result); strcpy (sc->errormsg, strerror (-result)); sc->needclose = -1; } } sc = sc->next; } st = st->next; } } }