static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp) { int64_t msg; int n, ret; n = 0; do { ret = qemu_chr_fe_read_all(s->server_chr, (uint8_t *)&msg + n, sizeof(msg) - n); if (ret < 0 && ret != -EINTR) { error_setg_errno(errp, -ret, "read from server failed"); return INT64_MIN; } n += ret; } while (n < sizeof(msg)); *pfd = qemu_chr_fe_get_msgfd(s->server_chr); return msg; }
static void ivshmem_check_version(void *opaque, const uint8_t * buf, int size) { IVShmemState *s = opaque; int tmp; int64_t version; if (!fifo_update_and_get_i64(s, buf, size, &version)) { return; } tmp = qemu_chr_fe_get_msgfd(s->server_chr); if (tmp != -1 || version != IVSHMEM_PROTOCOL_VERSION) { fprintf(stderr, "incompatible version, you are connecting to a ivshmem-" "server using a different protocol please check your setup\n"); qemu_chr_delete(s->server_chr); s->server_chr = NULL; return; } IVSHMEM_DPRINTF("version check ok, switch to real chardev handler\n"); qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read, ivshmem_event, s); }
static void ivshmem_read(void *opaque, const uint8_t *buf, int size) { IVShmemState *s = opaque; Error *err = NULL; int fd; int64_t msg; assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); s->msg_buffered_bytes += size; if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { return; } msg = le64_to_cpu(s->msg_buf); s->msg_buffered_bytes = 0; fd = qemu_chr_fe_get_msgfd(&s->server_chr); process_msg(s, msg, fd, &err); if (err) { error_report_err(err); } }
static void ivshmem_read(void *opaque, const uint8_t * buf, int flags) { IVShmemState *s = opaque; int incoming_fd, tmp_fd; int guest_max_eventfd; long incoming_posn; memcpy(&incoming_posn, buf, sizeof(long)); /* pick off s->server_chr->msgfd and store it, posn should accompany msg */ tmp_fd = qemu_chr_fe_get_msgfd(s->server_chr); IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, tmp_fd); /* make sure we have enough space for this guest */ if (incoming_posn >= s->nb_peers) { increase_dynamic_storage(s, incoming_posn); } if (tmp_fd == -1) { /* if posn is positive and unseen before then this is our posn*/ if ((incoming_posn >= 0) && (s->peers[incoming_posn].eventfds == NULL)) { /* receive our posn */ s->vm_id = incoming_posn; return; } else { /* otherwise an fd == -1 means an existing guest has gone away */ IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn); close_guest_eventfds(s, incoming_posn); return; } } /* because of the implementation of get_msgfd, we need a dup */ incoming_fd = dup(tmp_fd); if (incoming_fd == -1) { fprintf(stderr, "could not allocate file descriptor %s\n", strerror(errno)); return; } /* if the position is -1, then it's shared memory region fd */ if (incoming_posn == -1) { void * map_ptr; s->max_peer = 0; if (check_shm_size(s, incoming_fd) == -1) { exit(-1); } /* mmap the region and map into the BAR2 */ map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, incoming_fd, 0); memory_region_init_ram_ptr(&s->ivshmem, "ivshmem.bar2", s->ivshmem_size, map_ptr); vmstate_register_ram(&s->ivshmem, &s->dev.qdev); IVSHMEM_DPRINTF("guest h/w addr = %" PRIu64 ", size = %" PRIu64 "\n", s->ivshmem_offset, s->ivshmem_size); memory_region_add_subregion(&s->bar, 0, &s->ivshmem); /* only store the fd if it is successfully mapped */ s->shm_fd = incoming_fd; return; } /* each guest has an array of eventfds, and we keep track of how many * guests for each VM */ guest_max_eventfd = s->peers[incoming_posn].nb_eventfds; if (guest_max_eventfd == 0) { /* one eventfd per MSI vector */ s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors); } /* this is an eventfd for a particular guest VM */ IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn, guest_max_eventfd, incoming_fd); event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd], incoming_fd); /* increment count for particular guest */ s->peers[incoming_posn].nb_eventfds++; /* keep track of the maximum VM ID */ if (incoming_posn > s->max_peer) { s->max_peer = incoming_posn; } if (incoming_posn == s->vm_id) { s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s, &s->peers[s->vm_id].eventfds[guest_max_eventfd], guest_max_eventfd); } if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd); } }
static void ivshmem_read(void *opaque, const uint8_t *buf, int size) { IVShmemState *s = opaque; int incoming_fd; int new_eventfd; int64_t incoming_posn; Error *err = NULL; Peer *peer; if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) { return; } if (incoming_posn < -1) { IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn); return; } /* pick off s->server_chr->msgfd and store it, posn should accompany msg */ incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr); IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", incoming_posn, incoming_fd); /* make sure we have enough space for this peer */ if (incoming_posn >= s->nb_peers) { if (resize_peers(s, incoming_posn + 1) < 0) { error_report("failed to resize peers array"); if (incoming_fd != -1) { close(incoming_fd); } return; } } peer = &s->peers[incoming_posn]; if (incoming_fd == -1) { /* if posn is positive and unseen before then this is our posn*/ if (incoming_posn >= 0 && s->vm_id == -1) { /* receive our posn */ s->vm_id = incoming_posn; } else { /* otherwise an fd == -1 means an existing peer has gone away */ IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn); close_peer_eventfds(s, incoming_posn); } return; } /* if the position is -1, then it's shared memory region fd */ if (incoming_posn == -1) { void * map_ptr; if (memory_region_is_mapped(&s->ivshmem)) { error_report("shm already initialized"); close(incoming_fd); return; } if (check_shm_size(s, incoming_fd, &err) == -1) { error_report_err(err); close(incoming_fd); return; } /* mmap the region and map into the BAR2 */ map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, incoming_fd, 0); if (map_ptr == MAP_FAILED) { error_report("Failed to mmap shared memory %s", strerror(errno)); close(incoming_fd); return; } memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2", s->ivshmem_size, map_ptr); qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd); vmstate_register_ram(&s->ivshmem, DEVICE(s)); IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n", map_ptr, s->ivshmem_size); memory_region_add_subregion(&s->bar, 0, &s->ivshmem); return; } /* each peer has an associated array of eventfds, and we keep * track of how many eventfds received so far */ /* get a new eventfd: */ if (peer->nb_eventfds >= s->vectors) { error_report("Too many eventfd received, device has %d vectors", s->vectors); close(incoming_fd); return; } new_eventfd = peer->nb_eventfds++; /* this is an eventfd for a particular peer VM */ IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn, new_eventfd, incoming_fd); event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd); fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */ if (incoming_posn == s->vm_id) { setup_interrupt(s, new_eventfd); } if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { ivshmem_add_eventfd(s, incoming_posn, new_eventfd); } }