Пример #1
0
int
buffer_add_printf(buffer_t *buf, const char *fmt, ...)
{
	char *ptr;
	int   left;
	int   n;
	va_list ap;

	buffer_align(buf);
	ptr = buf->rptr + buf->off;
	left = buf->totallen - buf->off - buf->misalign;
	va_start(ap, fmt);
	n = vsnprintf(ptr, left, fmt, ap);
	va_end(ap);

	if (n >= left||n == -1) {
		if (buffer_expand(buf, 4096) == -1)
			return (-1);

		ptr = buf->rptr + buf->off;
		left = buf->totallen - buf->off - buf->misalign;
		va_start(ap, fmt);
		n = vsnprintf(ptr, left, fmt, ap);
		va_end(ap);
	}

	return n;
}
Пример #2
0
/*
 * Reads data from a file descriptor into a buffer.
 */
int
buffer_read(buffer_t *buf, int fd)
{
	char *p;
	int   n;

	if (ioctl(fd, FIONREAD, &n) < 0) {
	    DBG("--- ioctl:FIONREAD error ---\n");
	    return -1;
	}

	if (n > 0) {
		if (buffer_expand(buf, n) == -1) {
			return (-1);
		}

		p = buf->rptr + buf->off;
		n = read(fd, p, n);
		if (n > 0)
		{
			buf->off += n;
		}
	}
	return n;
}
Пример #3
0
int
buffer_add(buffer_t *buf, const void *data, size_t datlen)
{
	size_t need = buf->misalign + buf->off + datlen;

	if (buf->totallen < need) {
		if (buffer_expand(buf, datlen) == -1)
			return (-1);
	}

	memcpy(buf->rptr + buf->off, data, datlen);
	buf->off += datlen;

	return (0);
}
Пример #4
0
static void
read_cb(int fd, void *arg)
{
    struct argos_net_conn *conn = arg;

    /*
     * We only want to do reads if conn->state == NET_CONN_CONNECTED, but we
     * don't assert() this because its possible for this socket to be selected
     * simultaneously for both a read and a write and then for our state to
     * change during the write attempt.
     */
    if (conn->state != ARGOS_NET_CONN_CONNECTED)
        return;

    ssize_t len = recv(conn->sock, buffer_tail(conn->inbuf), buffer_remaining(conn->inbuf), 0);
    if (len == -1) {
        if (IS_NETWORK_ERROR(errno)) {
            /* network error; reset our connection */
            orion_log_warn_errno("recv");
            reset_connection(conn, 0);
        } else if (errno == EINTR) {
            /* don't care; ignore it */
        } else {
            /* anything else is a fatal error */
            orion_log_crit_errno("recv");
            kill_connection(conn);
            orion_log_crit("unexpected recv() error; connection is now dead");
        }
    } else if (len == 0) {
        /* EOF received (maybe other end is shutting down?) */
        orion_log_info("EOF received from remote end - closing socket");
        if (buffer_len(conn->inbuf) > 0)
            orion_log_warn("incomplete message received (inbuflen=%d)",
                buffer_len(conn->inbuf));
        reset_connection(conn, 1 /* flush buffers */);
    } else {
        /* ok, we read some data into the inbuf; update the buffer */
        int rv = buffer_expand(conn->inbuf, len);
        if (rv == -1) KABOOM("buffer_expand");

        conn->bytes_recv += len;

        /* now process (i.e. look for complete messages in) the inbuf */
        process_inbuf(conn);
    }
}
Пример #5
0
int
buffer_readn(buffer_t *buf, int fd, int n)
{
	char *p;

	if (buffer_expand(buf, n) == -1) {
		return (-1);
	}

	p = buf->rptr + buf->off;
	n = read(fd, p, n);
	if (n > 0)
	{
		buf->off += n;
	}

	return n;
}
Пример #6
0
int tcpclient_sendall(tcpclient_t *client, const char *buf, size_t len) {
	buffer_t *sendq = &client->send_queue;

	if (client->addr == NULL) {
		stats_error_log("tcpclient[%s]: Cannot send before connect!", client->name);
		return 1;
	} else {
		// Does nothing if we're already connected, triggers a
		// reconnect if backoff has expired.
		tcpclient_connect(client);
	}

	if (buffer_datacount(&client->send_queue) >= client->config->max_send_queue) {
		if (client->failing == 0) {
			stats_error_log("tcpclient[%s]: send queue for %s client is full (at %zd bytes, max is %" PRIu64 " bytes), dropping data",
					client->name,
					tcpclient_state_name[client->state],
					buffer_datacount(&client->send_queue),
					client->config->max_send_queue);
			client->failing = 1;
		}
		return 2;
	}
	if (buffer_spacecount(sendq) < len) {
		if (buffer_realign(sendq) != 0) {
			stats_error_log("tcpclient[%s]: Unable to realign send queue", client->name);
			return 3;
		}
	}
	while (buffer_spacecount(sendq) < len) {
		if (buffer_expand(sendq) != 0) {
			stats_error_log("tcpclient[%s]: Unable to allocate additional memory for send queue, dropping data", client->name);
			return 4;
		}
	}
	memcpy(buffer_tail(sendq), buf, len);
	buffer_produced(sendq, len);

	if (client->state == STATE_CONNECTED) {
		client->write_watcher.started = true;
		ev_io_start(client->loop, &client->write_watcher.watcher);
	}
	return 0;
}
Пример #7
0
static int
compress_and_xfer(struct argos_net_conn *conn, u_char force)
{
    /*
     * if the packet-buf is empty, quit right off; this check is not necessary
     * for correctness, but its nice to check it early for efficiency and also
     * to avoid some spam in the logs (e.g. lots of 'compression timeout'
     * messages) since this case is quite common.
     */
    if (buffer_len(conn->pktbuf) == 0)
        return 0;

#if ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_NONE
    size_t to_xfer = min(buffer_len(conn->pktbuf),
        buffer_remaining(conn->outbuf));

    if (to_xfer == 0) return 0;

#if ARGOS_NET_TRACE_IO
    struct timeval start;
    if (gettimeofday(&start, NULL) != 0) {
        orion_log_crit_errno("gettimeofday");
        return 0;
    }
#endif /* #if ARGOS_NET_TRACE_IO */
    
    memcpy(buffer_tail(conn->outbuf), buffer_head(conn->pktbuf), to_xfer);

#if ARGOS_NET_TRACE_IO
    struct timeval end;
    if (gettimeofday(&end, NULL) != 0) {
        orion_log_crit_errno("gettimeofday");
        return 0;
    }

    struct timeval elapsed;
    orion_time_subtract(&end, &start, &elapsed);

    float elapsed_msec = elapsed.tv_sec*1000 + (float)elapsed.tv_usec/1000;

    orion_log_debug("memcpy'ed %u bytes in %.2f ms (%.2f MB/s)",
        to_xfer, elapsed_msec, ((to_xfer/elapsed_msec)*1000)/(1024*1024));
#endif /* #if ARGOS_NET_TRACE_IO */

    if (buffer_expand(conn->outbuf, to_xfer) < 0)
        KABOOM("buffer_expand");

    if (buffer_discard(conn->pktbuf, to_xfer) < 0)
        KABOOM("buffer_discard");

#else  /* #if ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_NONE  */

    /*
     * In general, we want large blocks of packets to compress (because that
     * makes the compression more space-efficient).  So if there isn't much data
     * in pktbuf, just return without doing anything.  Note that we also need to
     * check to make sure there is enough room in the outbuf for us to put the
     * packets once they are compressed.
     */
    size_t minlen;

    if (force) {
        minlen = 1;
    } else {
        /*
         * If this conn has a small pktbuf (such that even with totally full,
         * the COMPRESS_HARD_MIN won't be met), then we need to adjust to a
         * limit that is actually attainable; we use 75% of the buffer size.
         */
        minlen = (3*buffer_size(conn->pktbuf))/4;

        /* usually, this is the value that we end up with for minlen: */
        minlen = min(minlen, COMPRESS_HARD_MIN);

        /*
         * one more special case: if argos_net_shutdown() was called on this
         * connection then there is no minimum compression size - we just want
         * to drain the buffers no matter how much is in there
         */
        if (conn->shutdown) minlen = 1;
    }

    /* quit if we don't have at least 'minlen' bytes of packet data to compress */
    if (buffer_len(conn->pktbuf) < minlen)
        return 0;

    /* check the total space available in the connection outbuf */
    size_t total_space = buffer_remaining(conn->outbuf);
    if (total_space < sizeof(struct argos_net_compress_msg))
        return 0;  /* not enough space available */

    /* this is the total space available for the compressed data */
    size_t space = total_space - sizeof(struct argos_net_compress_msg);

    /* don't exceed the maximum compression-block size */
    space = min(space, ARGOS_NET_MAX_COMPRESS_LEN);

    /*
     * given the space available, calculate how much packet data we can safely
     * consume (considering worst-cast input:output size ratios for whatever
     * compression algorithm we are using).
     */
    ssize_t ok_to_consume;

#if ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_LZO
    /* this is the inversion of the function given in the LZO faq file */
    ok_to_consume = (16*(space - 64 - 3))/17;
#elif ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_QUICKLZ
    /* this is the inversion of the function given in the QuickLZ manual */
    ok_to_consume = space - 400;
#else
    #error "unknown value for ARGOS_NET_USE_COMPRESSION"
#endif

    if (ok_to_consume <= 0) return 0;  /* not enough space available */

    /* number of bytes that will actually be compressed and transferred */
    size_t readlen = min(ok_to_consume, buffer_len(conn->pktbuf));
    assert(readlen > 0);

    /* don't exceed the maximum compression-block size */
    readlen = min(readlen, COMPRESS_HARD_MAX);

    /* where the compressed data should be written */
    u_char *write_ptr = buffer_tail(conn->outbuf) +
        sizeof(struct argos_net_compress_msg);

    struct argos_net_compress_msg *msg =
        (struct argos_net_compress_msg*)buffer_tail(conn->outbuf);
    msg->msgtype = htons(ARGOS_NET_COMPRESS_MSGTYPE);
    /* have to defer filling in msglen field */
    msg->algorithm = ARGOS_NET_USE_COMPRESSION;
    msg->orig_len = htonl(readlen);

#if ARGOS_NET_TRACE_IO
    /* measure the elapsed process time to try to detect cpu starvation */
    struct itimerval itimer_start;
    bzero(&itimer_start, sizeof(itimer_start));
    itimer_start.it_value.tv_sec = 100;  /* arbitrary large value */

    struct timeval start;
    if (gettimeofday(&start, NULL) != 0) {
        orion_log_crit_errno("gettimeofday");
        return 0;
    }

    if (setitimer(ITIMER_PROF, &itimer_start, NULL) != 0) {
        orion_log_crit_errno("setitimer");
        return 0;
    }
#endif /* #if ARGOS_NET_TRACE_IO */

#if ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_LZO
    lzo_uint lzo_outlen;
    int rv = lzo1x_1_compress(buffer_head(conn->pktbuf), readlen,
        write_ptr, &lzo_outlen, conn->lzo_wrk_space);
    if (rv != LZO_E_OK) {
        /* according to LZO documentation "this should NEVER happen" */
        orion_log_crit("LZO compression library internal error: %d", rv);
        return 0;
    }

    uint32_t outlen = lzo_outlen;

#elif ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_QUICKLZ
    uint32_t outlen = qlz_compress(buffer_head(conn->pktbuf), (char*)write_ptr, readlen, conn->qlz_scratch);
#else
    #error "unknown value for ARGOS_NET_USE_COMPRESSION"
#endif

#if ARGOS_NET_TRACE_IO
    /* call this before gettimeofday */
    struct itimerval itimer_end;
    if (getitimer(ITIMER_PROF, &itimer_end) != 0) {
        orion_log_crit_errno("getitimer");
        return 0;
    }
#endif /* #if ARGOS_NET_TRACE_IO */

    struct timeval end;
    if (gettimeofday(&end, NULL) != 0) {
        orion_log_crit_errno("gettimeofday");
        return 0;
    }

#if ARGOS_NET_TRACE_IO
    struct timeval real_elapsed;
    orion_time_subtract(&end, &start, &real_elapsed);

    float real_msec = real_elapsed.tv_sec*1000 +
        (float)real_elapsed.tv_usec/1000;

    struct timeval process_elapsed;
    orion_time_subtract(&itimer_start.it_value, &itimer_end.it_value,
        &process_elapsed);

    float process_msec = process_elapsed.tv_sec*1000 +
        (float)process_elapsed.tv_usec/1000;

    orion_log_debug("compressed %u bytes to %u (%.2f%%) in %.2f ms"
        " (%.2f MB/s); %.2f ms process time", readlen, outlen,
        (float)outlen*100/readlen, real_msec,
        ((readlen/real_msec)*1000)/(1024*1024), process_msec);
#endif /* #if ARGOS_NET_TRACE_IO */

    size_t total_len = sizeof(struct argos_net_compress_msg) + outlen;
    if (buffer_expand(conn->outbuf, total_len) < 0)
        KABOOM("buffer_expand");

    if (buffer_discard(conn->pktbuf, readlen) < 0)
        KABOOM("buffer_discard");

    /* write back into the msglen field now that we know the total length */
    msg->msglen = htonl(sizeof(struct argos_net_compress_msg) + outlen);

    /* check for incompressible block (this is normal for small blocks) */
    if (outlen > readlen) {
        if (readlen < 4096)
            orion_log_debug("incompressible block: inlen=%d, outlen=%d", readlen,
                outlen);
        else
            orion_log_warn("incompressible block: inlen=%d, outlen=%d", readlen,
                outlen);
    }
#endif  /* #if ARGOS_NET_USE_COMPRESSION == ARGOS_NET_COMPRESS_NONE  */

    /* cancel any currently schedule compression timeout event */
    if (conn->compress_evt_reg != NULL) {
        if (async_cancel(conn->compress_evt_reg) != 0)
            orion_log_crit_errno("async_cancel");
        conn->compress_evt_reg = NULL;
    }

    return 1;
}