CAMLprim value stub_eventchn_unmask(value xce, value port) { if (xc_evtchn_unmask(_H(xce), Int_val(port)) == -1) caml_failwith("evtchn unmask failed"); return Val_unit; }
static int evtchn_suspend(checkpoint_state* s) { int rc; rc = xc_evtchn_notify(s->xce, s->suspend_evtchn); if (rc < 0) { snprintf(errbuf, sizeof(errbuf), "failed to notify suspend event channel: %d", rc); s->errstr = errbuf; return -1; } do if (!(rc = pollfd(s, xc_evtchn_fd(s->xce)))) rc = xc_evtchn_pending(s->xce); while (rc >= 0 && rc != s->suspend_evtchn); if (rc <= 0) return -1; if (xc_evtchn_unmask(s->xce, s->suspend_evtchn) < 0) { snprintf(errbuf, sizeof(errbuf), "failed to unmask suspend notification channel: %d", rc); s->errstr = errbuf; return -1; } return 0; }
static void wait_for_event(void) { int ret; fd_set inset; evtchn_port_t port; struct timeval tv; int evtchn_fd; if (xce_handle < 0) { nanosleep(&opts.poll_sleep, NULL); return; } evtchn_fd = xc_evtchn_fd(xce_handle); FD_ZERO(&inset); FD_SET(evtchn_fd, &inset); tv.tv_sec = 1; tv.tv_usec = 0; // tv = millis_to_timespec(&opts.poll_sleep); ret = select(evtchn_fd+1, &inset, NULL, NULL, &tv); if ( (ret == 1) && FD_ISSET(evtchn_fd, &inset)) { if ((port = xc_evtchn_pending(xce_handle)) == -1) perror("Failed to read from event fd"); // if (port == virq_port) // printf("got the event I was looking for\r\n"); if (xc_evtchn_unmask(xce_handle, port) == -1) perror("Failed to write to event fd"); } }
static int init_evt_cli(struct libxenvchan *ctrl, int domain, xentoollog_logger *logger) { evtchn_port_or_error_t port; ctrl->event = xc_evtchn_open(logger, 0); if (!ctrl->event) return -1; port = xc_evtchn_bind_interdomain(ctrl->event, domain, ctrl->event_port); if (port < 0) goto fail; ctrl->event_port = port; if (xc_evtchn_unmask(ctrl->event, ctrl->event_port)) goto fail; return 0; fail: if (port >= 0) xc_evtchn_unbind(ctrl->event, port); xc_evtchn_close(ctrl->event); ctrl->event = NULL; return -1; }
static void handle_aio_events(struct fs_mount *mount) { int fd, ret, count, i, notify; evtchn_port_t port; /* AIO control block for the evtchn file destriptor */ struct aiocb evtchn_cb; const struct aiocb * cb_list[mount->nr_entries]; int request_ids[mount->nr_entries]; /* Prepare the AIO control block for evtchn */ fd = xc_evtchn_fd(mount->evth); bzero(&evtchn_cb, sizeof(struct aiocb)); evtchn_cb.aio_fildes = fd; evtchn_cb.aio_nbytes = sizeof(port); evtchn_cb.aio_buf = &port; assert(aio_read(&evtchn_cb) == 0); wait_again: /* Create list of active AIO requests */ count = 0; for(i=0; i<mount->nr_entries; i++) if(mount->requests[i].active) { cb_list[count] = &mount->requests[i].aiocb; request_ids[count] = i; count++; } /* Add the event channel at the end of the list. Event channel needs to be * handled last as it exits this function. */ cb_list[count] = &evtchn_cb; request_ids[count] = -1; count++; /* Block till an AIO requset finishes, or we get an event */ while(1) { int ret = aio_suspend(cb_list, count, NULL); if (!ret) break; assert(errno == EINTR); } for(i=0; i<count; i++) if(aio_error(cb_list[i]) != EINPROGRESS) { if(request_ids[i] >= 0) dispatch_response(mount, request_ids[i]); else goto read_event_channel; } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify); printf("Pushed responces and notify=%d\n", notify); if(notify) xc_evtchn_notify(mount->evth, mount->local_evtchn); goto wait_again; read_event_channel: assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t)); assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0); }
int libvchan_wait(struct libvchan *ctrl) { int ret; #ifndef CONFIG_STUBDOM ret = xc_evtchn_pending(ctrl->evfd); #else int vchan_fd = libvchan_fd_for_select(ctrl); fd_set rfds; libvchan_prepare_to_select(ctrl); while ((ret = xc_evtchn_pending(ctrl->evfd)) < 0) { FD_ZERO(&rfds); FD_SET(0, &rfds); FD_SET(vchan_fd, &rfds); ret = select(vchan_fd + 1, &rfds, NULL, NULL, NULL); if (ret < 0 && errno != EINTR) { perror("select"); return ret; } } #endif if (ret!=-1 && xc_evtchn_unmask(ctrl->evfd, ctrl->evport)) return -1; if (ret!=-1 && libvchan_is_eof(ctrl)) return -1; return ret; }
int libxenvchan_wait(struct libxenvchan *ctrl) { int ret = xc_evtchn_pending(ctrl->event); if (ret < 0) return -1; xc_evtchn_unmask(ctrl->event, ret); return 0; }
CAMLprim value stub_eventchn_unmask(value xce, value _port) { CAMLparam2(xce, _port); evtchn_port_t port; port = Int_val(_port); if (xc_evtchn_unmask(_H(xce), port)) caml_failwith("evtchn unmask failed"); CAMLreturn(Val_unit); }
CAMLprim value stub_eventchn_unmask(value xce, value port) { CAMLparam2(xce, port); if (xc_evtchn_unmask(_H(xce), Int_val(port)) == -1) { perror("xc_evtchn_unmask"); caml_failwith(strerror(errno)); } CAMLreturn(Val_unit); }
static void xen_be_evtchn_event(void *opaque) { struct XenDevice *xendev = opaque; evtchn_port_t port; port = xc_evtchn_pending(xendev->evtchndev); if (port != xendev->local_port) { xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n", port, xendev->local_port); return; } xc_evtchn_unmask(xendev->evtchndev, port); if (xendev->ops->event) { xendev->ops->event(xendev); } }
int xc_await_suspend(xc_interface *xch, xc_evtchn *xce, int suspend_evtchn) { int rc; do { rc = xc_evtchn_pending(xce); if (rc < 0) { ERROR("error polling suspend notification channel: %d", rc); return -1; } } while (rc != suspend_evtchn); /* harmless for one-off suspend */ if (xc_evtchn_unmask(xce, suspend_evtchn) < 0) ERROR("failed to unmask suspend notification channel: %d", rc); return 0; }
static int await_suspend(void) { int rc; do { rc = xc_evtchn_pending(si.xce); if (rc < 0) { warnx("error polling suspend notification channel: %d", rc); return -1; } } while (rc != si.suspend_evtchn); /* harmless for one-off suspend */ if (xc_evtchn_unmask(si.xce, si.suspend_evtchn) < 0) warnx("failed to unmask suspend notification channel: %d", rc); return 0; }
int libvchan_wait(struct libvchan *ctrl) { int ret; ret = xc_evtchn_pending_with_flush(ctrl->evfd); // I don't know how to avoid evtchn ring buffer overflow without // introducing any race condition (in qrexec-agent code). Because of that, // handle overflow with ring reset - because we just received some events // (overflow means ring full, so some events was recorded...) the reset // isn't critical here - always after libvchan_wait we check if there is // something to read from the vchan if (ret == -1 && GetLastError() == ERROR_IO_DEVICE) ret = xc_evtchn_reset(ctrl->evfd); if (ret!=-1 && xc_evtchn_unmask(ctrl->evfd, ctrl->evport)) return -1; if (ret!=-1 && libvchan_is_eof(ctrl)) return -1; return ret; }
/* Process events from the frontend event channel */ static void xenfb_dispatch_channel(void *opaque) { struct xenfb *xenfb = (struct xenfb *)opaque; evtchn_port_t port; port = xc_evtchn_pending(xenfb->evt_xch); if (port == -1) { xenfb_shutdown(xenfb); exit(1); } if (port == xenfb->fb.port) xenfb_on_fb_event(xenfb); else if (port == xenfb->kbd.port) xenfb_on_kbd_event(xenfb); if (xc_evtchn_unmask(xenfb->evt_xch, port) == -1) { xenfb_shutdown(xenfb); exit(1); } }
/* * XXX only called by tapdisk_xenio_ctx_ring_event */ static inline struct td_xenblkif * xenio_pending_blkif(struct td_xenio_ctx * const ctx) { evtchn_port_or_error_t port; struct td_xenblkif *blkif; int err; ASSERT(ctx); /* * Get the local port for which there is a pending event. */ port = xc_evtchn_pending(ctx->xce_handle); if (port == -1) { /* TODO log error */ return NULL; } /* * Find the block interface with that local port. */ tapdisk_xenio_ctx_find_blkif(ctx, blkif, blkif->port == port); if (blkif) { err = xc_evtchn_unmask(ctx->xce_handle, port); if (err) { /* TODO log error */ return NULL; } } /* * TODO Is it possible to have an pending event channel but no block * interface associated with it? */ return blkif; }
/// Unmasks event channel; must be called before calling select(), and only then void libvchan_prepare_to_select(struct libvchan *ctrl) { xc_evtchn_unmask(ctrl->evfd, ctrl->evport); }