static int lcm_udpm_handle (lcm_udpm_t *lcm) { int status; char ch; if(0 != _setup_recv_parts (lcm)) return -1; /* Read one byte from the notify pipe. This will block if no packets are * available yet and wake up when they are. */ status = lcm_internal_pipe_read(lcm->notify_pipe[0], &ch, 1); if (status == 0) { fprintf (stderr, "Error: lcm_handle read 0 bytes from notify_pipe\n"); return -1; } else if (status < 0) { fprintf (stderr, "Error: lcm_handle read: %s\n", strerror (errno)); return -1; } /* Dequeue the next received packet */ g_static_rec_mutex_lock (&lcm->mutex); lcm_buf_t * lcmb = lcm_buf_dequeue (lcm->inbufs_filled); if (!lcmb) { fprintf (stderr, "Error: no packet available despite getting notification.\n"); g_static_rec_mutex_unlock (&lcm->mutex); return -1; } /* If there are still packets in the queue, put something back in the pipe * so that future invocations will get called. */ if (!lcm_buf_queue_is_empty (lcm->inbufs_filled)) if (lcm_internal_pipe_write(lcm->notify_pipe[1], "+", 1) < 0) perror ("write to notify"); g_static_rec_mutex_unlock (&lcm->mutex); lcm_recv_buf_t rbuf; rbuf.data = (uint8_t*) lcmb->buf + lcmb->data_offset; rbuf.data_size = lcmb->data_size; rbuf.recv_utime = lcmb->recv_utime; rbuf.lcm = lcm->lcm; if(lcm->creating_read_thread) { // special case: If we're creating the read thread and are in // self-test mode, then only dispatch the self-test message. if(!strcmp(lcmb->channel_name, SELF_TEST_CHANNEL)) lcm_dispatch_handlers (lcm->lcm, &rbuf, lcmb->channel_name); } else { lcm_dispatch_handlers (lcm->lcm, &rbuf, lcmb->channel_name); } g_static_rec_mutex_lock (&lcm->mutex); lcm_buf_free_data(lcmb, lcm->ringbuf); lcm_buf_enqueue (lcm->inbufs_empty, lcmb); g_static_rec_mutex_unlock (&lcm->mutex); return 0; }
void lcm_buf_queue_free (lcm_buf_queue_t * q, lcm_ringbuf_t *ringbuf) { lcm_buf_t * el; while ( (el = lcm_buf_dequeue (q))) { lcm_buf_free_data(el, ringbuf); free (el); } free (q); }
static void lcm_buf_queue_free (lcm_buf_queue_t * q) { lcm_buf_t * el; while ( (el = lcm_buf_dequeue (q))) free (el); free (q); }
lcm_buf_t * lcm_buf_allocate_data(lcm_buf_queue_t * inbufs_empty, lcm_ringbuf_t **ringbuf) { lcm_buf_t * lcmb = NULL; // first allocate a buffer struct for the packet metadata if (lcm_buf_queue_is_empty(inbufs_empty)) { // allocate additional buffer structs if needed int i; for (i = 0; i < LCM_DEFAULT_RECV_BUFS; i++) { lcm_buf_t * nbuf = (lcm_buf_t *) calloc(1, sizeof(lcm_buf_t)); lcm_buf_enqueue(inbufs_empty, nbuf); } } lcmb = lcm_buf_dequeue(inbufs_empty); assert(lcmb); // allocate space on the ringbuffer for the packet data. // give it the maximum possible size for an unfragmented packet lcmb->buf = lcm_ringbuf_alloc(*ringbuf, LCM_MAX_UNFRAGMENTED_PACKET_SIZE); if (lcmb->buf == NULL) { // ringbuffer is full. allocate a larger ringbuffer // Can't free the old ringbuffer yet because it's in use (i.e., full) // Must wait until later to free it. assert(lcm_ringbuf_used(*ringbuf) > 0); dbg(DBG_LCM, "Orphaning ringbuffer %p\n", *ringbuf); unsigned int old_capacity = lcm_ringbuf_capacity(*ringbuf); unsigned int new_capacity = (unsigned int) (old_capacity * 1.5); // replace the passed in ringbuf with the new one *ringbuf = lcm_ringbuf_new(new_capacity); lcmb->buf = lcm_ringbuf_alloc(*ringbuf, 65536); assert(lcmb->buf); dbg(DBG_LCM, "Allocated new ringbuffer size %u\n", new_capacity); } // save a pointer to the ringbuf, in case it gets replaced by another call lcmb->ringbuf = *ringbuf; // zero the last byte so that strlen never segfaults lcmb->buf[65535] = 0; return lcmb; }
// read continuously until a complete message arrives static lcm_buf_t * udp_read_packet (lcm_udpm_t *lcm) { lcm_buf_t *lcmb = NULL; int sz = 0; g_static_rec_mutex_lock (&lcm->mutex); double buf_avail = lcm_ringbuf_available(lcm->ringbuf); g_static_rec_mutex_unlock (&lcm->mutex); if (buf_avail < lcm->udp_low_watermark) lcm->udp_low_watermark = buf_avail; GTimeVal tv; g_get_current_time(&tv); int elapsedsecs = tv.tv_sec - lcm->udp_last_report_secs; if (elapsedsecs > 2) { uint32_t total_bad = lcm->udp_discarded_lcmb + lcm->udp_discarded_buf + lcm->udp_discarded_bad; if (total_bad > 0 || lcm->udp_low_watermark < 0.5) { fprintf(stderr, "%d.%03d LCM loss %4.1f%% : %5d lcmb, %5d buf, %5d err, " "buf avail %4.1f%%\n", (int) tv.tv_sec, (int) tv.tv_usec/1000, total_bad * 100.0 / (lcm->udp_rx + total_bad), lcm->udp_discarded_lcmb, lcm->udp_discarded_buf, lcm->udp_discarded_bad, 100.0 * lcm->udp_low_watermark); lcm->udp_rx = 0; lcm->udp_discarded_lcmb = 0; lcm->udp_discarded_buf = 0; lcm->udp_discarded_bad = 0; lcm->udp_last_report_secs = tv.tv_sec; lcm->udp_low_watermark = HUGE; } } int got_complete_message = 0; while (!got_complete_message) { // wait for either incoming UDP data, or for an abort message fd_set fds; FD_ZERO (&fds); FD_SET (lcm->recvfd, &fds); FD_SET (lcm->thread_msg_pipe[0], &fds); SOCKET maxfd = MAX(lcm->recvfd, lcm->thread_msg_pipe[0]); if (select (maxfd + 1, &fds, NULL, NULL, NULL) <= 0) { perror ("udp_read_packet -- select:"); continue; } if (FD_ISSET (lcm->thread_msg_pipe[0], &fds)) { // received an exit command. dbg (DBG_LCM, "read thread received exit command\n"); if (lcmb) { // lcmb is not on one of the memory managed buffer queues. We could // either put it back on one of the queues, or just free it here. Do the // latter. // // Can also just free its lcm_buf_t here. Its data buffer is // managed either by the ring buffer or the fragment buffer, so // we can ignore it. free (lcmb); } return NULL; } // there is incoming UDP data ready. assert (FD_ISSET (lcm->recvfd, &fds)); if (!lcmb) { // try to allocate space on the ringbuffer for the new data // first allocate a buffer struct g_static_rec_mutex_lock (&lcm->mutex); lcmb = lcm_buf_dequeue (lcm->inbufs_empty); if (!lcmb) { g_static_rec_mutex_unlock (&lcm->mutex); udp_discard_packet (lcm); continue; } lcmb->buf_from_ringbuf = 1; // next allocate space on the ringbuffer. // give it the maximum possible size for an unfragmented packet lcmb->buf = lcm_ringbuf_alloc(lcm->ringbuf, 65536); g_static_rec_mutex_unlock (&lcm->mutex); if (!lcmb->buf) { // ringbuffer is full. discard the packet, put lcmb back on the // empty queue, and start waiting again udp_discard_packet (lcm); lcm_buf_enqueue (lcm->inbufs_empty, lcmb); lcmb = NULL; continue; } // zero the last byte so that strlen never segfaults lcmb->buf[65535] = 0; } struct iovec vec; vec.iov_base = lcmb->buf; vec.iov_len = 65535; #ifdef MSG_EXT_HDR // operating systems that provide SO_TIMESTAMP allow us to obtain more // accurate timestamps by having the kernel produce timestamps as soon // as packets are received. char controlbuf[64]; #endif struct msghdr msg; msg.msg_name = &lcmb->from; msg.msg_namelen = sizeof (struct sockaddr); msg.msg_iov = &vec; msg.msg_iovlen = 1; #ifdef MSG_EXT_HDR msg.msg_control = controlbuf; msg.msg_controllen = sizeof (controlbuf); msg.msg_flags = 0; #endif sz = recvmsg (lcm->recvfd, &msg, 0); if (sz < 0) { perror ("udp_read_packet -- recvmsg"); lcm->udp_discarded_bad++; continue; } if (sz < sizeof(lcm2_header_short_t)) { // packet too short to be LCM lcm->udp_discarded_bad++; continue; } lcmb->fromlen = msg.msg_namelen; int got_utime = 0; #ifdef SO_TIMESTAMP struct cmsghdr * cmsg = CMSG_FIRSTHDR (&msg); /* Get the receive timestamp out of the packet headers if possible */ while (!lcmb->recv_utime && cmsg) { if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_TIMESTAMP) { // GTimeVal is identical to struct timeval, so this cast is ok GTimeVal * t = (GTimeVal*) CMSG_DATA (cmsg); lcmb->recv_utime = (int64_t) t->tv_sec * 1000000 + t->tv_usec; got_utime = 1; break; } cmsg = CMSG_NXTHDR (&msg, cmsg); } #endif if (!got_utime) lcmb->recv_utime = _timestamp_now (); lcm2_header_short_t *hdr2 = (lcm2_header_short_t*) lcmb->buf; uint32_t rcvd_magic = ntohl(hdr2->magic); if (rcvd_magic == LCM2_MAGIC_SHORT) got_complete_message = _recv_short_message (lcm, lcmb, sz); else if (rcvd_magic == LCM2_MAGIC_LONG) got_complete_message = _recv_message_fragment (lcm, lcmb, sz); else { dbg (DBG_LCM, "LCM: bad magic\n"); lcm->udp_discarded_bad++; continue; } } // if the newly received packet is a short packet, then resize the space // allocated to it on the ringbuffer to exactly match the amount of space // required. That way, we do not use 64k of the ringbuffer for every // incoming message. if (lcmb->buf_from_ringbuf) { g_static_rec_mutex_lock (&lcm->mutex); lcm_ringbuf_shrink_last(lcm->ringbuf, lcmb->buf, sz); g_static_rec_mutex_unlock (&lcm->mutex); } return lcmb; }
static int lcm_udpm_handle (lcm_udpm_t *lcm) { int status; char ch; if (! lcm->thread_created) { if (0 != _setup_recv_thread (lcm)) return -1; } /* Read one byte from the notify pipe. This will block if no packets are * available yet and wake up when they are. */ status = lcm_internal_pipe_read(lcm->notify_pipe[0], &ch, 1); if (status == 0) { fprintf (stderr, "Error: lcm_handle read 0 bytes from notify_pipe\n"); return -1; } else if (status < 0) { fprintf (stderr, "Error: lcm_handle read: %s\n", strerror (errno)); return -1; } /* Dequeue the next received packet */ g_static_rec_mutex_lock (&lcm->mutex); lcm_buf_t * lcmb = lcm_buf_dequeue (lcm->inbufs_filled); if (!lcmb) { fprintf (stderr, "Error: no packet available despite getting notification.\n"); g_static_rec_mutex_unlock (&lcm->mutex); return -1; } /* If there are still packets in the queue, put something back in the pipe * so that future invocations will get called. */ if (!is_buf_queue_empty (lcm->inbufs_filled)) if (lcm_internal_pipe_write(lcm->notify_pipe[1], "+", 1) < 0) perror ("write to notify"); g_static_rec_mutex_unlock (&lcm->mutex); lcm_recv_buf_t rbuf; rbuf.data = (uint8_t*) lcmb->buf + lcmb->data_offset; rbuf.data_size = lcmb->data_size; rbuf.recv_utime = lcmb->recv_utime; rbuf.lcm = lcm->lcm; lcm_dispatch_handlers (lcm->lcm, &rbuf, lcmb->channel_name); g_static_rec_mutex_lock (&lcm->mutex); if (lcmb->buf_from_ringbuf) lcm_ringbuf_dealloc (lcm->ringbuf, lcmb->buf); else free (lcmb->buf); lcmb->buf = NULL; lcmb->buf_size = 0; lcm_buf_enqueue (lcm->inbufs_empty, lcmb); g_static_rec_mutex_unlock (&lcm->mutex); return 0; }