示例#1
0
static void
net_rx_pkt_pull_data(struct NetRxPkt *pkt,
                        const struct iovec *iov, int iovcnt,
                        size_t ploff)
{
    if (pkt->vlan_stripped) {
        net_rx_pkt_iovec_realloc(pkt, iovcnt + 1);

        pkt->vec[0].iov_base = pkt->ehdr_buf;
        pkt->vec[0].iov_len = sizeof(pkt->ehdr_buf);

        pkt->tot_len =
            iov_size(iov, iovcnt) - ploff + sizeof(struct eth_header);

        pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1,
                                iov, iovcnt, ploff, pkt->tot_len);
    } else {
        net_rx_pkt_iovec_realloc(pkt, iovcnt);

        pkt->tot_len = iov_size(iov, iovcnt) - ploff;
        pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total,
                                iov, iovcnt, ploff, pkt->tot_len);
    }

    eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->isip4, &pkt->isip6,
                      &pkt->isudp, &pkt->istcp,
                      &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
                      &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);

    trace_net_rx_pkt_parsed(pkt->isip4, pkt->isip6, pkt->isudp, pkt->istcp,
                            pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off);
}
示例#2
0
static int filter_send(CharBackend *chr_out,
                       const struct iovec *iov,
                       int iovcnt)
{
    int ret = 0;
    ssize_t size = 0;
    uint32_t len = 0;
    char *buf;

    size = iov_size(iov, iovcnt);
    if (!size) {
        return 0;
    }

    len = htonl(size);
    ret = qemu_chr_fe_write_all(chr_out, (uint8_t *)&len, sizeof(len));
    if (ret != sizeof(len)) {
        goto err;
    }

    buf = g_malloc(size);
    iov_to_buf(iov, iovcnt, 0, buf, size);
    ret = qemu_chr_fe_write_all(chr_out, (uint8_t *)buf, size);
    g_free(buf);
    if (ret != size) {
        goto err;
    }

    return 0;

err:
    return ret < 0 ? ret : -EIO;
}
示例#3
0
文件: dump.c 项目: jjykh/qemu
static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt)
{
    struct pcap_sf_pkthdr hdr;
    int64_t ts;
    int caplen;
    size_t size = iov_size(iov, cnt);
    struct iovec dumpiov[cnt + 1];

    /* Early return in case of previous error. */
    if (s->fd < 0) {
        return size;
    }

    ts = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL);
    caplen = size > s->pcap_caplen ? s->pcap_caplen : size;

    hdr.ts.tv_sec = ts / 1000000 + s->start_ts;
    hdr.ts.tv_usec = ts % 1000000;
    hdr.caplen = caplen;
    hdr.len = size;

    dumpiov[0].iov_base = &hdr;
    dumpiov[0].iov_len = sizeof(hdr);
    cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, 0, caplen);

    if (writev(s->fd, dumpiov, cnt + 1) != sizeof(hdr) + caplen) {
        error_report("network dump write error - stopping dump");
        close(s->fd);
        s->fd = -1;
    }

    return size;
}
示例#4
0
文件: lz4.c 项目: raedwulf/dillsocks
static int lz4_msendv(struct msock_vfs *mvfs,
      const struct iovec *iov, size_t iovlen, int64_t deadline) {
    struct lz4_sock *obj = dsock_cont(mvfs, struct lz4_sock, mvfs);
    /* Adjust the buffer size as needed. */
    size_t len = iov_size(iov, iovlen);
    size_t maxlen = LZ4F_compressFrameBound(len, NULL);
    if(obj->outlen < maxlen) {
        uint8_t *newbuf = realloc(obj->outbuf, maxlen);
        if(dsock_slow(!newbuf)) {errno = ENOMEM; return -1;}
        obj->outbuf = newbuf;
        obj->outlen = maxlen;
    }
    /* Compress the data. */
    /* TODO: Avoid the extra allocations and copies. */
    uint8_t *buf = malloc(len);
    if(dsock_slow(!buf)) {errno = ENOMEM; return -1;}
    iov_copyallfrom(buf, iov, iovlen);
    LZ4F_preferences_t prefs = {0};
    prefs.frameInfo.contentSize = len;
    size_t dstlen = LZ4F_compressFrame(obj->outbuf, obj->outlen,
        buf, len, &prefs);
    dsock_assert(!LZ4F_isError(dstlen));
    dsock_assert(dstlen <= obj->outlen);
    free(buf);
    /* Send the compressed frame. */
    return msend(obj->s, obj->outbuf, dstlen, deadline);
}
示例#5
0
/* filter APIs */
static ssize_t filter_buffer_receive_iov(NetFilterState *nf,
                                         NetClientState *sender,
                                         unsigned flags,
                                         const struct iovec *iov,
                                         int iovcnt,
                                         NetPacketSent *sent_cb)
{
    FilterBufferState *s = FILTER_BUFFER(nf);

    /*
     * We return size when buffer a packet, the sender will take it as
     * a already sent packet, so sent_cb should not be called later.
     *
     * FIXME: Even if the guest can't receive packets for some reasons,
     * the filter can still accept packets until its internal queue is full.
     * For example:
     *   For some reason, receiver could not receive more packets
     * (.can_receive() returns zero). Without a filter, at most one packet
     * will be queued in incoming queue and sender's poll will be disabled
     * unit its sent_cb() was called. With a filter, it will keep receiving
     * the packets without caring about the receiver. This is suboptimal.
     * May need more thoughts (e.g keeping sent_cb).
     */
    qemu_net_queue_append_iov(s->incoming_queue, sender, flags,
                              iov, iovcnt, NULL);
    return iov_size(iov, iovcnt);
}
示例#6
0
/*
 * Handler for control c_ovq virtqueue.
 * Called when guest sends a control message to the host.
 */
static void control_out(VirtIODevice *vdev, VirtQueue *vq)
{
	VirtQueueElement elem;
	VirtIOCrypto *crdev;
	uint8_t *buf;
	size_t len;
	
	FUNC_IN;	
	crdev = DO_UPCAST(VirtIOCrypto, vdev, vdev);
	
	len = 0;
	buf = NULL;

	if (!virtqueue_pop(vq, &elem))
		return ;

	len = iov_size(elem.out_sg, elem.out_num);
	buf = g_malloc(len);
	
	iov_to_buf(elem.out_sg, elem.out_num, 0, buf, len);
	handle_control_message(crdev, buf, len);

	virtqueue_push(vq, &elem, 0);
	virtio_notify(vdev, vq);

	g_free(buf);
	FUNC_OUT;
}
示例#7
0
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
                                          struct iovec **piov,
                                          unsigned int *pniov,
                                          size_t size)
{
    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
    int num;
    size_t buf_size;

    g_free(ring->sg);

    ring->sg = g_new0(struct iovec, 2);
    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);

    buf_size = iov_size(ring->sg, num);
    if (buf_size  < size) {
        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
                buf_size);
        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
        xen_9pfs_disconnect(&xen_9pfs->xendev);
    }

    *piov = ring->sg;
    *pniov = num;
}
示例#8
0
/**
 * Flushes QEMUFile buffer
 *
 * If there is writev_buffer QEMUFileOps it uses it otherwise uses
 * put_buffer ops. This will flush all pending data. If data was
 * only partially flushed, it will set an error state.
 */
void qemu_fflush(QEMUFile *f)
{
    ssize_t ret = 0;
    ssize_t expect = 0;

    if (!qemu_file_is_writable(f)) {
        return;
    }

    if (f->iovcnt > 0) {
        expect = iov_size(f->iov, f->iovcnt);
        ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos);
    }

    if (ret >= 0) {
        f->pos += ret;
    }
    /* We expect the QEMUFile write impl to send the full
     * data set we requested, so sanity check that.
     */
    if (ret != expect) {
        qemu_file_set_error(f, ret < 0 ? ret : -EIO);
    }
    f->buf_index = 0;
    f->iovcnt = 0;
}
示例#9
0
文件: filter.c 项目: MaddTheSane/qemu
ssize_t qemu_netfilter_pass_to_next(NetClientState *sender,
                                    unsigned flags,
                                    const struct iovec *iov,
                                    int iovcnt,
                                    void *opaque)
{
    int ret = 0;
    int direction;
    NetFilterState *nf = opaque;
    NetFilterState *next = NULL;

    if (!sender || !sender->peer) {
        /* no receiver, or sender been deleted, no need to pass it further */
        goto out;
    }

    if (nf->direction == NET_FILTER_DIRECTION_ALL) {
        if (sender == nf->netdev) {
            /* This packet is sent by netdev itself */
            direction = NET_FILTER_DIRECTION_TX;
        } else {
            direction = NET_FILTER_DIRECTION_RX;
        }
    } else {
        direction = nf->direction;
    }

    next = netfilter_next(nf, direction);
    while (next) {
        /*
         * if qemu_netfilter_pass_to_next been called, means that
         * the packet has been hold by filter and has already retured size
         * to the sender, so sent_cb shouldn't be called later, just
         * pass NULL to next.
         */
        ret = qemu_netfilter_receive(next, direction, sender, flags, iov,
                                     iovcnt, NULL);
        if (ret) {
            return ret;
        }
        next = netfilter_next(next, direction);
    }

    /*
     * We have gone through all filters, pass it to receiver.
     * Do the valid check again incase sender or receiver been
     * deleted while we go through filters.
     */
    if (sender && sender->peer) {
        qemu_net_queue_send_iov(sender->peer->incoming_queue,
                                sender, flags, iov, iovcnt, NULL);
    }

out:
    /* no receiver, or sender been deleted */
    return iov_size(iov, iovcnt);
}
示例#10
0
ssize_t sim_sock_sendmsg (struct SimSocket *socket, const struct msghdr *msg, int flags)
{
  struct socket *kernel_socket = (struct socket *)socket;
  struct iovec *kernel_iov = copy_iovec (msg->msg_iov, msg->msg_iovlen);
  struct msghdr kernel_msg = *msg;
  kernel_msg.msg_flags = flags;
  kernel_msg.msg_iov = kernel_iov;
  int retval = sock_sendmsg (kernel_socket, &kernel_msg, iov_size (msg));
  sim_free (kernel_iov);
  return retval;
}
示例#11
0
static ssize_t socket_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
                                    int64_t pos)
{
    QEMUFileSocket *s = opaque;
    ssize_t len;
    ssize_t size = iov_size(iov, iovcnt);

    len = iov_send(s->fd, iov, iovcnt, 0, size);
    if (len < size) {
        len = -socket_error();
    }
    return len;
}
示例#12
0
ssize_t sim_sock_recvmsg (struct SimSocket *socket, struct msghdr *msg, int flags)
{
  struct socket *kernel_socket = (struct socket *)socket;
  struct iovec *kernel_iov = copy_iovec (msg->msg_iov, msg->msg_iovlen);
  struct iovec *user_iov = msg->msg_iov;
  struct cmsghdr *user_cmsgh = msg->msg_control;
  size_t user_cmsghlen = msg->msg_controllen;
  msg->msg_iov = kernel_iov;
  int retval = sock_recvmsg (kernel_socket, msg, iov_size (msg), flags);
  msg->msg_iov = user_iov;
  msg->msg_control = user_cmsgh;
  msg->msg_controllen = user_cmsghlen - msg->msg_controllen;
  sim_free (kernel_iov);
  return retval;
}
示例#13
0
int qio_channel_readv_all_eof(QIOChannel *ioc,
                              const struct iovec *iov,
                              size_t niov,
                              Error **errp)
{
    int ret = -1;
    struct iovec *local_iov = g_new(struct iovec, niov);
    struct iovec *local_iov_head = local_iov;
    unsigned int nlocal_iov = niov;
    bool partial = false;

    nlocal_iov = iov_copy(local_iov, nlocal_iov,
                          iov, niov,
                          0, iov_size(iov, niov));

    while (nlocal_iov > 0) {
        ssize_t len;
        len = qio_channel_readv(ioc, local_iov, nlocal_iov, errp);
        if (len == QIO_CHANNEL_ERR_BLOCK) {
            if (qemu_in_coroutine()) {
                qio_channel_yield(ioc, G_IO_IN);
            } else {
                qio_channel_wait(ioc, G_IO_IN);
            }
            continue;
        } else if (len < 0) {
            goto cleanup;
        } else if (len == 0) {
            if (partial) {
                error_setg(errp,
                           "Unexpected end-of-file before all bytes were read");
            } else {
                ret = 0;
            }
            goto cleanup;
        }

        partial = true;
        iov_discard_front(&local_iov, &nlocal_iov, len);
    }

    ret = 1;

 cleanup:
    g_free(local_iov_head);
    return ret;
}
示例#14
0
void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
                                            struct iovec *iov, int iovcnt)
{
    if (throttle_enabled(&fst->cfg)) {
        if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
            !qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
            qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
        }

        throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));

        if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
            !throttle_schedule_timer(&fst->ts, &fst->tt, is_write)) {
            qemu_co_queue_next(&fst->throttled_reqs[is_write]);
        }
    }
}
示例#15
0
static ssize_t socket_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
                                    int64_t pos)
{
    QEMUFileSocket *s = opaque;
    ssize_t len;
    ssize_t size = iov_size(iov, iovcnt);
    ssize_t offset = 0;
    int     err;

    while (size > 0) {
        len = iov_send(s->fd, iov, iovcnt, offset, size);

        if (len > 0) {
            size -= len;
            offset += len;
        }

        if (size > 0) {
            err = socket_error();

            if (err != EAGAIN && err != EWOULDBLOCK) {
                error_report("socket_writev_buffer: Got err=%d for (%zu/%zu)",
                             err, (size_t)size, (size_t)len);
                /*
                 * If I've already sent some but only just got the error, I
                 * could return the amount validly sent so far and wait for the
                 * next call to report the error, but I'd rather flag the error
                 * immediately.
                 */
                return -err;
            }

            /* Emulate blocking */
            GPollFD pfd;

            pfd.fd = s->fd;
            pfd.events = G_IO_OUT | G_IO_ERR;
            pfd.revents = 0;
            TFR(err = g_poll(&pfd, 1, -1 /* no timeout */));
            /* Errors other than EINTR intentionally ignored */
        }
     }

    return offset;
}
示例#16
0
static ssize_t filter_redirector_receive_iov(NetFilterState *nf,
                                             NetClientState *sender,
                                             unsigned flags,
                                             const struct iovec *iov,
                                             int iovcnt,
                                             NetPacketSent *sent_cb)
{
    MirrorState *s = FILTER_REDIRECTOR(nf);
    int ret;

    if (qemu_chr_fe_get_driver(&s->chr_out)) {
        ret = filter_send(&s->chr_out, iov, iovcnt);
        if (ret) {
            error_report("filter redirector send failed(%s)", strerror(-ret));
        }
        return iov_size(iov, iovcnt);
    } else {
        return 0;
    }
}
示例#17
0
static ssize_t unix_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
                                  int64_t pos)
{
    QEMUFileSocket *s = opaque;
    ssize_t len, offset;
    ssize_t size = iov_size(iov, iovcnt);
    ssize_t total = 0;

    assert(iovcnt > 0);
    offset = 0;
    while (size > 0) {
        /* Find the next start position; skip all full-sized vector elements  */
        while (offset >= iov[0].iov_len) {
            offset -= iov[0].iov_len;
            iov++, iovcnt--;
        }

        /* skip `offset' bytes from the (now) first element, undo it on exit */
        assert(iovcnt > 0);
        iov[0].iov_base += offset;
        iov[0].iov_len -= offset;

        do {
            len = writev(s->fd, iov, iovcnt);
        } while (len == -1 && errno == EINTR);
        if (len == -1) {
            return -errno;
        }

        /* Undo the changes above */
        iov[0].iov_base -= offset;
        iov[0].iov_len += offset;

        /* Prepare for the next iteration */
        offset += len;
        total += len;
        size -= len;
    }

    return total;
}
示例#18
0
文件: nbd-client.c 项目: 8tab/qemu
static void nbd_co_receive_reply(NBDClientSession *s,
                                 NBDRequest *request,
                                 NBDReply *reply,
                                 QEMUIOVector *qiov)
{
    int i = HANDLE_TO_INDEX(s, request->handle);

    /* Wait until we're woken up by nbd_read_reply_entry.  */
    s->requests[i].receiving = true;
    qemu_coroutine_yield();
    s->requests[i].receiving = false;
    *reply = s->reply;
    if (reply->handle != request->handle || !s->ioc || s->quit) {
        reply->error = EIO;
    } else {
        if (qiov && reply->error == 0) {
            assert(request->len == iov_size(qiov->iov, qiov->niov));
            if (qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
                                      NULL) < 0) {
                reply->error = EIO;
                s->quit = true;
            }
        }

        /* Tell the read handler to read another header.  */
        s->reply.handle = 0;
    }

    s->requests[i].coroutine = NULL;

    /* Kick the read_reply_co to get the next reply.  */
    if (s->read_reply_co) {
        aio_co_wake(s->read_reply_co);
    }

    qemu_co_mutex_lock(&s->send_mutex);
    s->in_flight--;
    qemu_co_queue_next(&s->free_sema);
    qemu_co_mutex_unlock(&s->send_mutex);
}
示例#19
0
文件: lz4.c 项目: raedwulf/dillsocks
static ssize_t lz4_mrecvv(struct msock_vfs *mvfs,
      const struct iovec *iov, size_t iovlen, int64_t deadline) {
    struct lz4_sock *obj = dsock_cont(mvfs, struct lz4_sock, mvfs);
    /* Adjust the buffer size as needed. */
    size_t len = iov_size(iov, iovlen);
    size_t maxlen = LZ4F_compressFrameBound(len, NULL);
    if(obj->inlen < maxlen) {
        uint8_t *newbuf = realloc(obj->inbuf, maxlen);
        if(dsock_slow(!newbuf)) {errno = ENOMEM; return -1;}
        obj->inbuf = newbuf;
        obj->inlen = maxlen;
    }
    /* Get the compressed message. */
    ssize_t sz = mrecv(obj->s, obj->inbuf, obj->inlen, deadline);
    if(dsock_slow(sz < 0)) return -1;
    /* Extract size of the uncompressed message from LZ4 frame header. */
    LZ4F_frameInfo_t info;
    size_t infolen = sz;
    size_t ec = LZ4F_getFrameInfo(obj->dctx, &info, obj->inbuf, &infolen);
    if(dsock_slow(LZ4F_isError(ec))) {errno = EPROTO; return -1;}
    /* Size is a required field. */
    if(dsock_slow(info.contentSize == 0)) {errno = EPROTO; return -1;}
    /* Decompressed message would exceed the buffer size. */
    if(dsock_slow(info.contentSize > len)) {errno = EMSGSIZE; return -1;}
    /* Decompress. */
    /* TODO: Avoid the extra allocations and copies. */
    uint8_t *buf = malloc(len);
    if(dsock_slow(!buf)) {errno = ENOMEM; return -1;}
    size_t dstlen = len;
    size_t srclen = sz - infolen;
    ec = LZ4F_decompress(obj->dctx, buf, &dstlen,
        obj->inbuf + infolen, &srclen, NULL);
    if(dsock_slow(LZ4F_isError(ec))) {errno = EPROTO; return -1;}
    if(dsock_slow(ec != 0)) {errno = EPROTO; return -1;}
    dsock_assert(srclen == sz - infolen);
    iov_copyallto(iov, iovlen, buf);
    free(buf);
    return dstlen;
}
示例#20
0
int qio_channel_writev_all(QIOChannel *ioc,
                           const struct iovec *iov,
                           size_t niov,
                           Error **errp)
{
    int ret = -1;
    struct iovec *local_iov = g_new(struct iovec, niov);
    struct iovec *local_iov_head = local_iov;
    unsigned int nlocal_iov = niov;

    nlocal_iov = iov_copy(local_iov, nlocal_iov,
                          iov, niov,
                          0, iov_size(iov, niov));

    while (nlocal_iov > 0) {
        ssize_t len;
        len = qio_channel_writev(ioc, local_iov, nlocal_iov, errp);
        if (len == QIO_CHANNEL_ERR_BLOCK) {
            if (qemu_in_coroutine()) {
                qio_channel_yield(ioc, G_IO_OUT);
            } else {
                qio_channel_wait(ioc, G_IO_OUT);
            }
            continue;
        }
        if (len < 0) {
            goto cleanup;
        }

        iov_discard_front(&local_iov, &nlocal_iov, len);
    }

    ret = 0;
 cleanup:
    g_free(local_iov_head);
    return ret;
}
示例#21
0
static void test_to_from_buf_1(void)
{
     unsigned niov;
     struct iovec *iov;
     size_t sz;
     unsigned char *ibuf, *obuf;
     unsigned i, j, n;

     iov_random(&iov, &niov);

     sz = iov_size(iov, niov);

     ibuf = g_malloc(sz + 8) + 4;
     memcpy(ibuf-4, "aaaa", 4); memcpy(ibuf + sz, "bbbb", 4);
     obuf = g_malloc(sz + 8) + 4;
     memcpy(obuf-4, "xxxx", 4); memcpy(obuf + sz, "yyyy", 4);

     /* fill in ibuf with 0123456... */
     for (i = 0; i < sz; ++i) {
         ibuf[i] = i & 255;
     }

     for (i = 0; i <= sz; ++i) {

         /* Test from/to buf for offset(i) in [0..sz] up to the end of buffer.
          * For last iteration with offset == sz, the procedure should
          * skip whole vector and process exactly 0 bytes */

         /* first set bytes [i..sz) to some "random" value */
         n = iov_memset(iov, niov, 0, 0xff, -1);
         g_assert(n == sz);

         /* next copy bytes [i..sz) from ibuf to iovec */
         n = iov_from_buf(iov, niov, i, ibuf + i, -1);
         g_assert(n == sz - i);

         /* clear part of obuf */
         memset(obuf + i, 0, sz - i);
         /* and set this part of obuf to values from iovec */
         n = iov_to_buf(iov, niov, i, obuf + i, -1);
         g_assert(n == sz - i);

         /* now compare resulting buffers */
         g_assert(memcmp(ibuf, obuf, sz) == 0);

         /* test just one char */
         n = iov_to_buf(iov, niov, i, obuf + i, 1);
         g_assert(n == (i < sz));
         if (n) {
             g_assert(obuf[i] == (i & 255));
         }

         for (j = i; j <= sz; ++j) {
             /* now test num of bytes cap up to byte no. j,
              * with j in [i..sz]. */

             /* clear iovec */
             n = iov_memset(iov, niov, 0, 0xff, -1);
             g_assert(n == sz);

             /* copy bytes [i..j) from ibuf to iovec */
             n = iov_from_buf(iov, niov, i, ibuf + i, j - i);
             g_assert(n == j - i);

             /* clear part of obuf */
             memset(obuf + i, 0, j - i);

             /* copy bytes [i..j) from iovec to obuf */
             n = iov_to_buf(iov, niov, i, obuf + i, j - i);
             g_assert(n == j - i);

             /* verify result */
             g_assert(memcmp(ibuf, obuf, sz) == 0);

             /* now actually check if the iovec contains the right data */
             test_iov_bytes(iov, niov, i, j - i);
         }
    }
    g_assert(!memcmp(ibuf-4, "aaaa", 4) && !memcmp(ibuf+sz, "bbbb", 4));
    g_free(ibuf-4);
    g_assert(!memcmp(obuf-4, "xxxx", 4) && !memcmp(obuf+sz, "yyyy", 4));
    g_free(obuf-4);
    iov_free(iov, niov);
}
示例#22
0
static coroutine void nagle_sender(int s, size_t batch, int64_t interval,
      uint8_t *buf, int sendch, int ackch) {
    /* Amount of data in the buffer. */
    size_t len = 0;
    /* Last time at least one byte was sent. */
    int64_t last = now();
    while(1) {
        /* Get data to send from the user coroutine. */
        struct nagle_vec vec;
        int rc = chrecv(sendch, &vec, sizeof(vec),
            interval >= 0 && len ? last + interval : -1);
        if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
        /* Timeout expired. Flush the data in the buffer. */
        if(dsock_slow(rc < 0 && errno == ETIMEDOUT)) {
            rc = bsend(s, buf, len, -1);
            if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
            dsock_assert(rc == 0);
            len = 0;
            last = now();
            continue;
        }
        dsock_assert(rc == 0);
        /* If data fit into the buffer, store them there. */
        size_t bytes = iov_size(vec.iov, vec.iovlen);
        if(len + bytes < batch) {
            iov_copyallfrom(buf + len, vec.iov, vec.iovlen);
            len += bytes;
            int err = 0;
            rc = chsend(ackch, &err, sizeof(err), -1);
            if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
            dsock_assert(rc == 0);
            continue;
        }
        if(len > 0) {
            /* Flush the buffer. */
            rc = bsend(s, buf, len, -1);
            if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
            /* Pass the error to the user. */
            if(dsock_slow(rc < 0)) {
                int err = errno;
                rc = chsend(ackch, &err, sizeof(err), -1);
                if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
                dsock_assert(rc == 0);
                return;
            }
            len = 0;
            last = now();
        }
        /* Once again: If data fit into buffer store them there. */
        if(bytes < batch) {
            iov_copyallfrom(buf, vec.iov, vec.iovlen);
            len = bytes;
            int err = 0;
            rc = chsend(ackch, &err, sizeof(err), -1);
            if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
            dsock_assert(rc == 0);
            continue;
        }
        /* This is a big chunk of data, no need to Nagle it.
           We'll send it straight away. */
        rc = bsendv(s, vec.iov, vec.iovlen, -1);
        if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
        dsock_assert(rc == 0);
        last = now();
        int err = 0;
        rc = chsend(ackch, &err, sizeof(err), -1);
        if(dsock_slow(rc < 0 && errno == ECANCELED)) return;
        dsock_assert(rc == 0);
    }
}
示例#23
0
static void test_io(void)
{
#ifndef _WIN32
/* socketpair(PF_UNIX) which does not exist on windows */

    int sv[2];
    int r;
    unsigned i, j, k, s, t;
    fd_set fds;
    unsigned niov;
    struct iovec *iov, *siov;
    unsigned char *buf;
    size_t sz;

    iov_random(&iov, &niov);
    sz = iov_size(iov, niov);
    buf = g_malloc(sz);
    for (i = 0; i < sz; ++i) {
        buf[i] = i & 255;
    }
    iov_from_buf(iov, niov, 0, buf, sz);

    siov = g_malloc(sizeof(*iov) * niov);
    memcpy(siov, iov, sizeof(*iov) * niov);

    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) {
       perror("socketpair");
       exit(1);
    }

    FD_ZERO(&fds);

    t = 0;
    if (fork() == 0) {
       /* writer */

       close(sv[0]);
       FD_SET(sv[1], &fds);
       fcntl(sv[1], F_SETFL, O_RDWR|O_NONBLOCK);
       r = g_test_rand_int_range(sz / 2, sz);
       setsockopt(sv[1], SOL_SOCKET, SO_SNDBUF, &r, sizeof(r));

       for (i = 0; i <= sz; ++i) {
           for (j = i; j <= sz; ++j) {
               k = i;
               do {
                   s = g_test_rand_int_range(0, j - k + 1);
                   r = iov_send(sv[1], iov, niov, k, s);
                   g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0);
                   if (r >= 0) {
                       k += r;
                       t += r;
                       usleep(g_test_rand_int_range(0, 30));
                   } else if (errno == EAGAIN) {
                       select(sv[1]+1, NULL, &fds, NULL, NULL);
                       continue;
                   } else {
                       perror("send");
                       exit(1);
                   }
               } while(k < j);
           }
       }
       exit(0);

    } else {
       /* reader & verifier */

       close(sv[1]);
       FD_SET(sv[0], &fds);
       fcntl(sv[0], F_SETFL, O_RDWR|O_NONBLOCK);
       r = g_test_rand_int_range(sz / 2, sz);
       setsockopt(sv[0], SOL_SOCKET, SO_RCVBUF, &r, sizeof(r));
       usleep(500000);

       for (i = 0; i <= sz; ++i) {
           for (j = i; j <= sz; ++j) {
               k = i;
               iov_memset(iov, niov, 0, 0xff, -1);
               do {
                   s = g_test_rand_int_range(0, j - k + 1);
                   r = iov_recv(sv[0], iov, niov, k, s);
                   g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0);
                   if (r > 0) {
                       k += r;
                       t += r;
                   } else if (!r) {
                       if (s) {
                           break;
                       }
                   } else if (errno == EAGAIN) {
                       select(sv[0]+1, &fds, NULL, NULL, NULL);
                       continue;
                   } else {
                       perror("recv");
                       exit(1);
                   }
               } while(k < j);
               test_iov_bytes(iov, niov, i, j - i);
           }
        }
     }
#endif
}
示例#24
0
文件: socket.c 项目: 01org/KVMGT-qemu
{
    NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
    uint32_t len = htonl(size);
    struct iovec iov[] = {
        {
            .iov_base = &len,
            .iov_len  = sizeof(len),
        }, {
            .iov_base = (void *)buf,
            .iov_len  = size,
        },
    };
    size_t remaining;
    ssize_t ret;

    remaining = iov_size(iov, 2) - s->send_index;
    ret = iov_send(s->fd, iov, 2, s->send_index, remaining);

    if (ret == -1 && errno == EAGAIN) {
        ret = 0; /* handled further down */
    }
    if (ret == -1) {
        s->send_index = 0;
        return -errno;
    }
    if (ret < (ssize_t)remaining) {
        s->send_index += ret;
        net_socket_write_poll(s, true);
        return 0;
    }
    s->send_index = 0;
示例#25
0
static void test_discard_front(void)
{
    struct iovec *iov;
    struct iovec *iov_tmp;
    unsigned int iov_cnt;
    unsigned int iov_cnt_tmp;
    void *old_base;
    size_t size;
    size_t ret;

    /* Discard zero bytes */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, 0);
    g_assert(ret == 0);
    g_assert(iov_tmp == iov);
    g_assert(iov_cnt_tmp == iov_cnt);
    iov_free(iov, iov_cnt);

    /* Discard more bytes than vector size */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    size = iov_size(iov, iov_cnt);
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, size + 1);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == 0);
    iov_free(iov, iov_cnt);

    /* Discard entire vector */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    size = iov_size(iov, iov_cnt);
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == 0);
    iov_free(iov, iov_cnt);

    /* Discard within first element */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    old_base = iov->iov_base;
    size = g_test_rand_int_range(1, iov->iov_len);
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_tmp == iov);
    g_assert(iov_cnt_tmp == iov_cnt);
    g_assert(iov_tmp->iov_base == old_base + size);
    iov_tmp->iov_base = old_base; /* undo before g_free() */
    iov_free(iov, iov_cnt);

    /* Discard entire first element */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, iov->iov_len);
    g_assert(ret == iov->iov_len);
    g_assert(iov_tmp == iov + 1);
    g_assert(iov_cnt_tmp == iov_cnt - 1);
    iov_free(iov, iov_cnt);

    /* Discard within second element */
    iov_random(&iov, &iov_cnt);
    iov_tmp = iov;
    iov_cnt_tmp = iov_cnt;
    old_base = iov[1].iov_base;
    size = iov->iov_len + g_test_rand_int_range(1, iov[1].iov_len);
    ret = iov_discard_front(&iov_tmp, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_tmp == iov + 1);
    g_assert(iov_cnt_tmp == iov_cnt - 1);
    g_assert(iov_tmp->iov_base == old_base + (size - iov->iov_len));
    iov_tmp->iov_base = old_base; /* undo before g_free() */
    iov_free(iov, iov_cnt);
}
示例#26
0
文件: nbd-client.c 项目: 8tab/qemu
static int nbd_co_send_request(BlockDriverState *bs,
                               NBDRequest *request,
                               QEMUIOVector *qiov)
{
    NBDClientSession *s = nbd_get_client_session(bs);
    int rc, i;

    qemu_co_mutex_lock(&s->send_mutex);
    while (s->in_flight == MAX_NBD_REQUESTS) {
        qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
    }
    s->in_flight++;

    for (i = 0; i < MAX_NBD_REQUESTS; i++) {
        if (s->requests[i].coroutine == NULL) {
            break;
        }
    }

    g_assert(qemu_in_coroutine());
    assert(i < MAX_NBD_REQUESTS);

    s->requests[i].coroutine = qemu_coroutine_self();
    s->requests[i].receiving = false;

    request->handle = INDEX_TO_HANDLE(s, i);

    if (s->quit) {
        rc = -EIO;
        goto err;
    }
    if (!s->ioc) {
        rc = -EPIPE;
        goto err;
    }

    if (qiov) {
        qio_channel_set_cork(s->ioc, true);
        rc = nbd_send_request(s->ioc, request);
        if (rc >= 0 && !s->quit) {
            assert(request->len == iov_size(qiov->iov, qiov->niov));
            if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
                                       NULL) < 0) {
                rc = -EIO;
            }
        }
        qio_channel_set_cork(s->ioc, false);
    } else {
        rc = nbd_send_request(s->ioc, request);
    }

err:
    if (rc < 0) {
        s->quit = true;
        s->requests[i].coroutine = NULL;
        s->in_flight--;
        qemu_co_queue_next(&s->free_sema);
    }
    qemu_co_mutex_unlock(&s->send_mutex);
    return rc;
}
示例#27
0
文件: virtio-crypto.c 项目: 8tab/qemu
static int
virtio_crypto_handle_request(VirtIOCryptoReq *request)
{
    VirtIOCrypto *vcrypto = request->vcrypto;
    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
    VirtQueueElement *elem = &request->elem;
    int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
    struct virtio_crypto_op_data_req req;
    int ret;
    struct iovec *in_iov;
    struct iovec *out_iov;
    unsigned in_num;
    unsigned out_num;
    uint32_t opcode;
    uint8_t status = VIRTIO_CRYPTO_ERR;
    uint64_t session_id;
    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
    Error *local_err = NULL;

    if (elem->out_num < 1 || elem->in_num < 1) {
        virtio_error(vdev, "virtio-crypto dataq missing headers");
        return -1;
    }

    out_num = elem->out_num;
    out_iov = elem->out_sg;
    in_num = elem->in_num;
    in_iov = elem->in_sg;
    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
                != sizeof(req))) {
        virtio_error(vdev, "virtio-crypto request outhdr too short");
        return -1;
    }
    iov_discard_front(&out_iov, &out_num, sizeof(req));

    if (in_iov[in_num - 1].iov_len <
            sizeof(struct virtio_crypto_inhdr)) {
        virtio_error(vdev, "virtio-crypto request inhdr too short");
        return -1;
    }
    /* We always touch the last byte, so just see how big in_iov is. */
    request->in_len = iov_size(in_iov, in_num);
    request->in = (void *)in_iov[in_num - 1].iov_base
              + in_iov[in_num - 1].iov_len
              - sizeof(struct virtio_crypto_inhdr);
    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));

    /*
     * The length of operation result, including dest_data
     * and digest_result if exists.
     */
    request->in_num = in_num;
    request->in_iov = in_iov;

    opcode = ldl_le_p(&req.header.opcode);
    session_id = ldq_le_p(&req.header.session_id);

    switch (opcode) {
    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
        ret = virtio_crypto_handle_sym_req(vcrypto,
                         &req.u.sym_req,
                         &sym_op_info,
                         out_iov, out_num);
        /* Serious errors, need to reset virtio crypto device */
        if (ret == -EFAULT) {
            return -1;
        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
            virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
            virtio_crypto_free_request(request);
        } else {
            sym_op_info->session_id = session_id;

            /* Set request's parameter */
            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
            request->u.sym_op_info = sym_op_info;
            ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
                                    request, queue_index, &local_err);
            if (ret < 0) {
                status = -ret;
                if (local_err) {
                    error_report_err(local_err);
                }
            } else { /* ret == VIRTIO_CRYPTO_OK */
                status = ret;
            }
            virtio_crypto_req_complete(request, status);
            virtio_crypto_free_request(request);
        }
        break;
    case VIRTIO_CRYPTO_HASH:
    case VIRTIO_CRYPTO_MAC:
    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
    case VIRTIO_CRYPTO_AEAD_DECRYPT:
    default:
        error_report("virtio-crypto unsupported dataq opcode: %u",
                     opcode);
        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
        virtio_crypto_free_request(request);
    }

    return 0;
}
示例#28
0
static void test_discard_back(void)
{
    struct iovec *iov;
    unsigned int iov_cnt;
    unsigned int iov_cnt_tmp;
    void *old_base;
    size_t size;
    size_t ret;

    /* Discard zero bytes */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    ret = iov_discard_back(iov, &iov_cnt_tmp, 0);
    g_assert(ret == 0);
    g_assert(iov_cnt_tmp == iov_cnt);
    iov_free(iov, iov_cnt);

    /* Discard more bytes than vector size */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    size = iov_size(iov, iov_cnt);
    ret = iov_discard_back(iov, &iov_cnt_tmp, size + 1);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == 0);
    iov_free(iov, iov_cnt);

    /* Discard entire vector */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    size = iov_size(iov, iov_cnt);
    ret = iov_discard_back(iov, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == 0);
    iov_free(iov, iov_cnt);

    /* Discard within last element */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    old_base = iov[iov_cnt - 1].iov_base;
    size = g_test_rand_int_range(1, iov[iov_cnt - 1].iov_len);
    ret = iov_discard_back(iov, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == iov_cnt);
    g_assert(iov[iov_cnt - 1].iov_base == old_base);
    iov_free(iov, iov_cnt);

    /* Discard entire last element */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    old_base = iov[iov_cnt - 1].iov_base;
    size = iov[iov_cnt - 1].iov_len;
    ret = iov_discard_back(iov, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == iov_cnt - 1);
    iov_free(iov, iov_cnt);

    /* Discard within second-to-last element */
    iov_random(&iov, &iov_cnt);
    iov_cnt_tmp = iov_cnt;
    old_base = iov[iov_cnt - 2].iov_base;
    size = iov[iov_cnt - 1].iov_len +
           g_test_rand_int_range(1, iov[iov_cnt - 2].iov_len);
    ret = iov_discard_back(iov, &iov_cnt_tmp, size);
    g_assert(ret == size);
    g_assert(iov_cnt_tmp == iov_cnt - 1);
    g_assert(iov[iov_cnt - 2].iov_base == old_base);
    iov_free(iov, iov_cnt);
}
示例#29
0
文件: netmap.c 项目: 8tab/qemu
static ssize_t netmap_receive_iov(NetClientState *nc,
                    const struct iovec *iov, int iovcnt)
{
    NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
    struct netmap_ring *ring = s->tx;
    uint32_t last;
    uint32_t idx;
    uint8_t *dst;
    int j;
    uint32_t i;

    if (unlikely(!ring)) {
        /* Drop the packet. */
        return iov_size(iov, iovcnt);
    }

    last = i = ring->cur;

    if (nm_ring_space(ring) < iovcnt) {
        /* Not enough netmap slots. */
        netmap_write_poll(s, true);
        return 0;
    }

    for (j = 0; j < iovcnt; j++) {
        int iov_frag_size = iov[j].iov_len;
        int offset = 0;
        int nm_frag_size;

        /* Split each iovec fragment over more netmap slots, if
           necessary. */
        while (iov_frag_size) {
            nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);

            if (unlikely(nm_ring_empty(ring))) {
                /* We run out of netmap slots while splitting the
                   iovec fragments. */
                netmap_write_poll(s, true);
                return 0;
            }

            idx = ring->slot[i].buf_idx;
            dst = (uint8_t *)NETMAP_BUF(ring, idx);

            ring->slot[i].len = nm_frag_size;
            ring->slot[i].flags = NS_MOREFRAG;
            pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size);

            last = i;
            i = nm_ring_next(ring, i);

            offset += nm_frag_size;
            iov_frag_size -= nm_frag_size;
        }
    }
    /* The last slot must not have NS_MOREFRAG set. */
    ring->slot[last].flags &= ~NS_MOREFRAG;

    /* Now update ring->cur and ring->head. */
    ring->cur = ring->head = i;

    ioctl(s->nmd->fd, NIOCTXSYNC, NULL);

    return iov_size(iov, iovcnt);
}