static int qemu_event_init(void) { int err; int fds[2]; err = pipe(fds); if (err == -1) return -errno; err = fcntl_setfl(fds[0], O_NONBLOCK); if (err < 0) goto fail; err = fcntl_setfl(fds[1], O_NONBLOCK); if (err < 0) goto fail; qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, (void *)(unsigned long)fds[0]); io_thread_fd = fds[1]; return 0; fail: close(fds[0]); close(fds[1]); return err; }
static int qemu_event_init(void) { int err; int fds[2]; err = qemu_eventfd(fds); if (err == -1) { return -errno; } err = fcntl_setfl(fds[0], O_NONBLOCK); if (err < 0) { goto fail; } err = fcntl_setfl(fds[1], O_NONBLOCK); if (err < 0) { goto fail; } qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, (void *)(intptr_t)fds[0]); io_thread_fd = fds[1]; return 0; fail: close(fds[0]); close(fds[1]); return err; }
static int qemu_signal_init(void) { int sigfd; sigset_t set; /* * SIG_IPI must be blocked in the main thread and must not be caught * by sigwait() in the signal thread. Otherwise, the cpu thread will * not catch it reliably. */ sigemptyset(&set); sigaddset(&set, SIG_IPI); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); sigaddset(&set, SIGBUS); pthread_sigmask(SIG_BLOCK, &set, NULL); sigdelset(&set, SIG_IPI); sigfd = qemu_signalfd(&set); if (sigfd == -1) { fprintf(stderr, "failed to create signalfd\n"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, (void *)(intptr_t)sigfd); return 0; }
static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, Error **errp) { Peer *peer = &s->peers[posn]; int vector; /* * The N-th connect message for this peer comes with the file * descriptor for vector N-1. Count messages to find the vector. */ if (peer->nb_eventfds >= s->vectors) { error_setg(errp, "Too many eventfd received, device has %d vectors", s->vectors); close(fd); return; } vector = peer->nb_eventfds++; IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); event_notifier_init_fd(&peer->eventfds[vector], fd); fcntl_setfl(fd, O_NONBLOCK); /* msix/irqfd poll non block */ if (posn == s->vm_id) { setup_interrupt(s, vector, errp); /* TODO do we need to handle the error? */ } if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { ivshmem_add_eventfd(s, posn, vector); } }
static int qemu_signal_init(Error **errp) { int sigfd; sigset_t set; /* * SIG_IPI must be blocked in the main thread and must not be caught * by sigwait() in the signal thread. Otherwise, the cpu thread will * not catch it reliably. */ sigemptyset(&set); sigaddset(&set, SIG_IPI); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); sigaddset(&set, SIGBUS); /* SIGINT cannot be handled via signalfd, so that ^C can be used * to interrupt QEMU when it is being run under gdb. SIGHUP and * SIGTERM are also handled asynchronously, even though it is not * strictly necessary, because they use the same handler as SIGINT. */ pthread_sigmask(SIG_BLOCK, &set, NULL); sigdelset(&set, SIG_IPI); sigfd = qemu_signalfd(&set); if (sigfd == -1) { error_setg_errno(errp, errno, "failed to create signalfd"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler(sigfd, sigfd_handler, NULL, (void *)(intptr_t)sigfd); return 0; }
int event_notifier_init(EventNotifier *e, int active) { int fds[2]; int ret; #ifdef CONFIG_EVENTFD ret = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); #else ret = -1; errno = ENOSYS; #endif if (ret >= 0) { e->rfd = e->wfd = ret; } else { if (errno != ENOSYS) { return -errno; } if (qemu_pipe(fds) < 0) { return -errno; } ret = fcntl_setfl(fds[0], O_NONBLOCK); if (ret < 0) { ret = -errno; goto fail; } ret = fcntl_setfl(fds[1], O_NONBLOCK); if (ret < 0) { ret = -errno; goto fail; } e->rfd = fds[0]; e->wfd = fds[1]; } if (active) { event_notifier_set(e); } return 0; fail: close(fds[0]); close(fds[1]); return ret; }
static void enable_sigio_timer(int fd) { struct sigaction act; /* timer signal */ sigfillset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = host_alarm_handler; sigaction(SIGIO, &act, NULL); fcntl_setfl(fd, O_ASYNC); fcntl(fd, F_SETOWN, getpid()); }
static int qemu_signal_init(void) { int sigfd; sigset_t set; #ifdef CONFIG_IOTHREAD /* SIGUSR2 used by posix-aio-compat.c */ sigemptyset(&set); sigaddset(&set, SIGUSR2); pthread_sigmask(SIG_UNBLOCK, &set, NULL); /* * SIG_IPI must be blocked in the main thread and must not be caught * by sigwait() in the signal thread. Otherwise, the cpu thread will * not catch it reliably. */ sigemptyset(&set); sigaddset(&set, SIG_IPI); pthread_sigmask(SIG_BLOCK, &set, NULL); sigemptyset(&set); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); sigaddset(&set, SIGBUS); #else sigemptyset(&set); sigaddset(&set, SIGBUS); if (kvm_enabled()) { /* * We need to process timer signals synchronously to avoid a race * between exit_request check and KVM vcpu entry. */ sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); } #endif pthread_sigmask(SIG_BLOCK, &set, NULL); sigfd = qemu_signalfd(&set); if (sigfd == -1) { fprintf(stderr, "failed to create signalfd\n"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, (void *)(intptr_t)sigfd); return 0; }
static int qemu_signalfd_init(sigset_t mask) { int sigfd; sigfd = qemu_signalfd(&mask); if (sigfd == -1) { fprintf(stderr, "failed to create signalfd\n"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, (void *)(unsigned long) sigfd); return 0; }
int qemu_http_init(void) { QemuHttpState *state = (QemuHttpState *)qemu_mallocz(sizeof(QemuHttpState)); ghttp_state = state; state->listen_fd = 0; memset(state->clients, 0, sizeof(state->clients)); state->connections = 0; state->listen_fd = qemu_socket(AF_INET, SOCK_STREAM, 0); if (state->listen_fd < 0) { return 0; } const int on = 1; setsockopt(state->listen_fd, SOL_SOCKET, SO_REUSEADDR, (void*)&on, sizeof(on)); struct sockaddr_in laddr; memset(&laddr, 0, sizeof(laddr)); laddr.sin_family = AF_INET; laddr.sin_port = htons(DEFAULT_HTTP_PORT); laddr.sin_addr.s_addr = INADDR_ANY; if (bind(state->listen_fd, (struct sockaddr *)&laddr, sizeof(laddr)) == -1) { return 0; } if (listen(state->listen_fd, QEMU_MAX_HTTP_CONNECTIONS) == -1) { return 0; } fcntl_setfl(state->listen_fd, O_NONBLOCK); qemu_set_fd_handler(state->listen_fd, accept_handler, NULL, (void *)state); return 1; }
static void accept_handler(void *opaque) { QemuHttpState *state = (QemuHttpState *)opaque; if (state->connections == QEMU_MAX_HTTP_CONNECTIONS) return; int fd = qemu_accept(state->listen_fd, NULL, 0); if (fd < 0) return; state->connections++; int i; int mindex = 0; for (i = 0; i < QEMU_MAX_HTTP_CONNECTIONS; ++i) { if (!state->clients[i].open) { mindex = i; break; } } QemuHttpConnection *conn = &state->clients[mindex]; conn->fd = fd; conn->open = 1; conn->state = NEW_REQUEST; conn->pos = 0; conn->page_complete = 0; conn->page_pos = -1; conn->out_size = 0; memset(conn->page, 0, sizeof(conn->page)); fcntl_setfl(conn->fd, O_NONBLOCK); qemu_set_fd_handler(conn->fd, client_handler, NULL, (void *)conn); }
static int raw_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { BDRVRawState *s; BDRVRawReopenState *raw_s; int ret = 0; assert(state != NULL); assert(state->bs != NULL); s = state->bs->opaque; state->opaque = g_malloc0(sizeof(BDRVRawReopenState)); raw_s = state->opaque; #ifdef CONFIG_LINUX_AIO raw_s->use_aio = s->use_aio; /* we can use s->aio_ctx instead of a copy, because the use_aio flag is * valid in the 'false' condition even if aio_ctx is set, and raw_set_aio() * won't override aio_ctx if aio_ctx is non-NULL */ if (raw_set_aio(&s->aio_ctx, &raw_s->use_aio, state->flags)) { return -1; } #endif if (s->type == FTYPE_FD || s->type == FTYPE_CD) { raw_s->open_flags |= O_NONBLOCK; } raw_parse_flags(state->flags, &raw_s->open_flags); raw_s->fd = -1; int fcntl_flags = O_APPEND | O_NONBLOCK; #ifdef O_NOATIME fcntl_flags |= O_NOATIME; #endif #ifdef O_ASYNC /* Not all operating systems have O_ASYNC, and those that don't * will not let us track the state into raw_s->open_flags (typically * you achieve the same effect with an ioctl, for example I_SETSIG * on Solaris). But we do not use O_ASYNC, so that's fine. */ assert((s->open_flags & O_ASYNC) == 0); #endif if ((raw_s->open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { /* dup the original fd */ /* TODO: use qemu fcntl wrapper */ #ifdef F_DUPFD_CLOEXEC raw_s->fd = fcntl(s->fd, F_DUPFD_CLOEXEC, 0); #else raw_s->fd = dup(s->fd); if (raw_s->fd != -1) { qemu_set_cloexec(raw_s->fd); } #endif if (raw_s->fd >= 0) { ret = fcntl_setfl(raw_s->fd, raw_s->open_flags); if (ret) { qemu_close(raw_s->fd); raw_s->fd = -1; } } } /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ if (raw_s->fd == -1) { assert(!(raw_s->open_flags & O_CREAT)); raw_s->fd = qemu_open(state->bs->filename, raw_s->open_flags); if (raw_s->fd == -1) { ret = -1; } } return ret; }
static void ivshmem_read(void *opaque, const uint8_t *buf, int size) { IVShmemState *s = opaque; int incoming_fd; int new_eventfd; int64_t incoming_posn; Error *err = NULL; Peer *peer; if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) { return; } if (incoming_posn < -1) { IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn); return; } /* pick off s->server_chr->msgfd and store it, posn should accompany msg */ incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr); IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", incoming_posn, incoming_fd); /* make sure we have enough space for this peer */ if (incoming_posn >= s->nb_peers) { if (resize_peers(s, incoming_posn + 1) < 0) { error_report("failed to resize peers array"); if (incoming_fd != -1) { close(incoming_fd); } return; } } peer = &s->peers[incoming_posn]; if (incoming_fd == -1) { /* if posn is positive and unseen before then this is our posn*/ if (incoming_posn >= 0 && s->vm_id == -1) { /* receive our posn */ s->vm_id = incoming_posn; } else { /* otherwise an fd == -1 means an existing peer has gone away */ IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn); close_peer_eventfds(s, incoming_posn); } return; } /* if the position is -1, then it's shared memory region fd */ if (incoming_posn == -1) { void * map_ptr; if (memory_region_is_mapped(&s->ivshmem)) { error_report("shm already initialized"); close(incoming_fd); return; } if (check_shm_size(s, incoming_fd, &err) == -1) { error_report_err(err); close(incoming_fd); return; } /* mmap the region and map into the BAR2 */ map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, incoming_fd, 0); if (map_ptr == MAP_FAILED) { error_report("Failed to mmap shared memory %s", strerror(errno)); close(incoming_fd); return; } memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2", s->ivshmem_size, map_ptr); qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd); vmstate_register_ram(&s->ivshmem, DEVICE(s)); IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n", map_ptr, s->ivshmem_size); memory_region_add_subregion(&s->bar, 0, &s->ivshmem); return; } /* each peer has an associated array of eventfds, and we keep * track of how many eventfds received so far */ /* get a new eventfd: */ if (peer->nb_eventfds >= s->vectors) { error_report("Too many eventfd received, device has %d vectors", s->vectors); close(incoming_fd); return; } new_eventfd = peer->nb_eventfds++; /* this is an eventfd for a particular peer VM */ IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn, new_eventfd, incoming_fd); event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd); fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */ if (incoming_posn == s->vm_id) { setup_interrupt(s, new_eventfd); } if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { ivshmem_add_eventfd(s, incoming_posn, new_eventfd); } }