static void handle_aio_events(struct fs_mount *mount) { int fd, ret, count, i, notify; evtchn_port_t port; /* AIO control block for the evtchn file destriptor */ struct aiocb evtchn_cb; const struct aiocb * cb_list[mount->nr_entries]; int request_ids[mount->nr_entries]; /* Prepare the AIO control block for evtchn */ fd = xc_evtchn_fd(mount->evth); bzero(&evtchn_cb, sizeof(struct aiocb)); evtchn_cb.aio_fildes = fd; evtchn_cb.aio_nbytes = sizeof(port); evtchn_cb.aio_buf = &port; assert(aio_read(&evtchn_cb) == 0); wait_again: /* Create list of active AIO requests */ count = 0; for(i=0; i<mount->nr_entries; i++) if(mount->requests[i].active) { cb_list[count] = &mount->requests[i].aiocb; request_ids[count] = i; count++; } /* Add the event channel at the end of the list. Event channel needs to be * handled last as it exits this function. */ cb_list[count] = &evtchn_cb; request_ids[count] = -1; count++; /* Block till an AIO requset finishes, or we get an event */ while(1) { int ret = aio_suspend(cb_list, count, NULL); if (!ret) break; assert(errno == EINTR); } for(i=0; i<count; i++) if(aio_error(cb_list[i]) != EINPROGRESS) { if(request_ids[i] >= 0) dispatch_response(mount, request_ids[i]); else goto read_event_channel; } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify); printf("Pushed responces and notify=%d\n", notify); if(notify) xc_evtchn_notify(mount->evth, mount->local_evtchn); goto wait_again; read_event_channel: assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t)); assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0); }
static void wait_for_event(void) { int ret; fd_set inset; evtchn_port_t port; struct timeval tv; int evtchn_fd; if (xce_handle < 0) { nanosleep(&opts.poll_sleep, NULL); return; } evtchn_fd = xc_evtchn_fd(xce_handle); FD_ZERO(&inset); FD_SET(evtchn_fd, &inset); tv.tv_sec = 1; tv.tv_usec = 0; // tv = millis_to_timespec(&opts.poll_sleep); ret = select(evtchn_fd+1, &inset, NULL, NULL, &tv); if ( (ret == 1) && FD_ISSET(evtchn_fd, &inset)) { if ((port = xc_evtchn_pending(xce_handle)) == -1) perror("Failed to read from event fd"); // if (port == virq_port) // printf("got the event I was looking for\r\n"); if (xc_evtchn_unmask(xce_handle, port) == -1) perror("Failed to write to event fd"); } }
/* Register a QEMU graphical console, and key/mouse handler, * connecting up their events to the frontend */ static int xenfb_register_console(struct xenfb *xenfb) { /* Register our keyboard & mouse handlers */ qemu_add_kbd_event_handler(xenfb_key_event, xenfb); qemu_add_mouse_event_handler(xenfb_mouse_event, xenfb, xenfb->abs_pointer_wanted, "Xen PVFB Mouse"); /* Tell QEMU to allocate a graphical console */ graphic_console_init(xenfb->ds, xenfb_update, xenfb_invalidate, xenfb_screen_dump, xenfb); if (xenfb->ds->dpy_resize_shared) dpy_resize_shared(xenfb->ds, xenfb->width, xenfb->height, xenfb->depth, xenfb->row_stride, xenfb->pixels + xenfb->offset); else dpy_resize(xenfb->ds, xenfb->width, xenfb->height); if (qemu_set_fd_handler2(xc_evtchn_fd(xenfb->evt_xch), NULL, xenfb_dispatch_channel, NULL, xenfb) < 0) return -1; if (qemu_set_fd_handler2(xs_fileno(xenfb->xsh), NULL, xenfb_dispatch_store, NULL, xenfb) < 0) return -1; fprintf(stderr, "Xen Framebuffer registered\n"); return 0; }
static int evtchn_suspend(checkpoint_state* s) { int rc; rc = xc_evtchn_notify(s->xce, s->suspend_evtchn); if (rc < 0) { snprintf(errbuf, sizeof(errbuf), "failed to notify suspend event channel: %d", rc); s->errstr = errbuf; return -1; } do if (!(rc = pollfd(s, xc_evtchn_fd(s->xce)))) rc = xc_evtchn_pending(s->xce); while (rc >= 0 && rc != s->suspend_evtchn); if (rc <= 0) return -1; if (xc_evtchn_unmask(s->xce, s->suspend_evtchn) < 0) { snprintf(errbuf, sizeof(errbuf), "failed to unmask suspend notification channel: %d", rc); s->errstr = errbuf; return -1; } return 0; }
CAMLprim value stub_eventchn_fd(value xce) { int fd; fd = xc_evtchn_fd(_H(xce)); if (fd == -1) caml_failwith("evtchn fd failed"); return Val_int(fd); }
void xen_be_unbind_evtchn(struct XenDevice *xendev) { if (xendev->local_port == -1) { return; } qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); xc_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); xendev->local_port = -1; }
CAMLprim value stub_eventchn_fd(value xce) { CAMLparam1(xce); int fd; fd = xc_evtchn_fd(_H(xce)); if (fd == -1) { perror("xc_evtchn_fd"); caml_failwith(strerror(errno)); } CAMLreturn(Val_int(fd)); }
CAMLprim value stub_eventchn_fd(value xce) { CAMLparam1(xce); CAMLlocal1(result); int fd; fd = xc_evtchn_fd(_H(xce)); if (fd == -1) caml_failwith("evtchn fd failed"); result = Val_int(fd); CAMLreturn(result); }
int xen_be_bind_evtchn(struct XenDevice *xendev) { if (xendev->local_port != -1) { return 0; } xendev->local_port = xc_evtchn_bind_interdomain (xendev->evtchndev, xendev->dom, xendev->remote_port); if (xendev->local_port == -1) { xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n"); return -1; } xen_be_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), xen_be_evtchn_event, NULL, xendev); return 0; }
int libxenvchan_fd_for_select(struct libxenvchan *ctrl) { return xc_evtchn_fd(ctrl->event); }
static int xenpaging_wait_for_event_or_timeout(struct xenpaging *paging) { xc_interface *xch = paging->xc_handle; xc_evtchn *xce = paging->vm_event.xce_handle; char **vec, *val; unsigned int num; struct pollfd fd[2]; int port; int rc; int timeout; /* Wait for event channel and xenstore */ fd[0].fd = xc_evtchn_fd(xce); fd[0].events = POLLIN | POLLERR; fd[1].fd = xs_fileno(paging->xs_handle); fd[1].events = POLLIN | POLLERR; /* No timeout while page-out is still in progress */ timeout = paging->use_poll_timeout ? 100 : 0; rc = poll(fd, 2, timeout); if ( rc < 0 ) { if (errno == EINTR) return 0; PERROR("Poll exited with an error"); return -1; } /* First check for guest shutdown */ if ( rc && fd[1].revents & POLLIN ) { DPRINTF("Got event from xenstore\n"); vec = xs_read_watch(paging->xs_handle, &num); if ( vec ) { DPRINTF("path '%s' token '%s'\n", vec[XS_WATCH_PATH], vec[XS_WATCH_TOKEN]); if ( strcmp(vec[XS_WATCH_TOKEN], watch_token) == 0 ) { /* If our guest disappeared, set interrupt flag and fall through */ if ( xs_is_domain_introduced(paging->xs_handle, paging->vm_event.domain_id) == false ) { xs_unwatch(paging->xs_handle, "@releaseDomain", watch_token); interrupted = SIGQUIT; /* No further poll result processing */ rc = 0; } } else if ( strcmp(vec[XS_WATCH_PATH], watch_target_tot_pages) == 0 ) { int ret, target_tot_pages; val = xs_read(paging->xs_handle, XBT_NULL, vec[XS_WATCH_PATH], NULL); if ( val ) { ret = sscanf(val, "%d", &target_tot_pages); if ( ret > 0 ) { /* KiB to pages */ target_tot_pages >>= 2; if ( target_tot_pages < 0 || target_tot_pages > paging->max_pages ) target_tot_pages = paging->max_pages; paging->target_tot_pages = target_tot_pages; /* Disable poll() delay while new target is not yet reached */ paging->use_poll_timeout = 0; DPRINTF("new target_tot_pages %d\n", target_tot_pages); } free(val); } }
/// The fd to use for select() set EVTCHN libvchan_fd_for_select(struct libvchan *ctrl) { return xc_evtchn_fd(ctrl->evfd); }