static int init_evt_cli(struct libxenvchan *ctrl, int domain, xentoollog_logger *logger) { evtchn_port_or_error_t port; ctrl->event = xc_evtchn_open(logger, 0); if (!ctrl->event) return -1; port = xc_evtchn_bind_interdomain(ctrl->event, domain, ctrl->event_port); if (port < 0) goto fail; ctrl->event_port = port; if (xc_evtchn_unmask(ctrl->event, ctrl->event_port)) goto fail; return 0; fail: if (port >= 0) xc_evtchn_unbind(ctrl->event, port); xc_evtchn_close(ctrl->event); ctrl->event = NULL; return -1; }
CAMLprim value stub_eventchn_unbind(value xce, value port) { if(xc_evtchn_unbind(_H(xce), Int_val(port)) == -1) caml_failwith("evtchn unbind failed"); return Val_unit; }
int tapdisk_xenblkif_disconnect(const domid_t domid, const int devid) { struct td_xenblkif *blkif; blkif = tapdisk_xenblkif_find(domid, devid); if (!blkif) return -ENODEV; if (tapdisk_xenblkif_reqs_pending(blkif)) { RING_DEBUG(blkif, "disconnect from ring with %d pending requests\n", blkif->ring_size - blkif->n_reqs_free); if (td_flag_test(blkif->vbd->state, TD_VBD_PAUSED)) RING_ERR(blkif, "disconnect from ring with %d pending requests " "and the VBD paused\n", blkif->ring_size - blkif->n_reqs_free); list_move(&blkif->entry, &blkif->vbd->dead_rings); blkif->dead = true; if (blkif->ctx && blkif->port >= 0) { xc_evtchn_unbind(blkif->ctx->xce_handle, blkif->port); blkif->port = -1; } /* * FIXME shall we unmap the ring or will that lead to some fatal error * in tapdisk? IIUC if we don't unmap it we'll get errors during grant * copy. */ return 0; } else return tapdisk_xenblkif_destroy(blkif); }
int xc_suspend_evtchn_release(xc_interface *xch, xc_evtchn *xce, int domid, int suspend_evtchn) { if (suspend_evtchn >= 0) xc_evtchn_unbind(xce, suspend_evtchn); return unlock_suspend_event(xch, domid); }
void xen_be_unbind_evtchn(struct XenDevice *xendev) { if (xendev->local_port == -1) { return; } qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); xc_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); xendev->local_port = -1; }
static void xenfb_unbind(struct xenfb_device *dev) { if (dev->page) { munmap(dev->page, XC_PAGE_SIZE); dev->page = NULL; } if (dev->port >= 0) { xc_evtchn_unbind(dev->xenfb->evt_xch, dev->port); dev->port = -1; } }
CAMLprim value stub_eventchn_unbind(value xce, value port) { CAMLparam2(xce, port); if(xc_evtchn_unbind(_H(xce), Int_val(port)) == -1) { perror("xc_evtchn_unbind"); caml_failwith(strerror(errno)); } CAMLreturn(Val_unit); }
CAMLprim value stub_eventchn_unbind(value xce, value port) { CAMLparam2(xce, port); int rc; rc = xc_evtchn_unbind(_H(xce), Int_val(port)); if (rc == -1) caml_failwith("evtchn unbind failed"); CAMLreturn(Val_unit); }
static int suspend_evtchn_release(void) { if (si.suspend_evtchn >= 0) { xc_evtchn_unbind(si.xce, si.suspend_evtchn); si.suspend_evtchn = -1; } if (si.xce >= 0) { xc_evtchn_close(si.xce); si.xce = -1; } return 0; }
int tapdisk_xenblkif_destroy(struct td_xenblkif * blkif) { int err; ASSERT(blkif); if (tapdisk_xenblkif_chkrng_event_id(blkif) >= 0) { tapdisk_server_unregister_event( tapdisk_xenblkif_chkrng_event_id(blkif)); blkif->chkrng_event = -1; } tapdisk_xenblkif_reqs_free(blkif); if (blkif->ctx) { if (blkif->port >= 0) xc_evtchn_unbind(blkif->ctx->xce_handle, blkif->port); if (blkif->rings.common.sring) { err = xc_gnttab_munmap(blkif->ctx->xcg_handle, blkif->rings.common.sring, blkif->ring_n_pages); if (unlikely(err)) { err = errno; EPRINTF("failed to unmap ring page %p (%d pages): %s " "(error ignored)\n", blkif->rings.common.sring, blkif->ring_n_pages, strerror(err)); err = 0; } } list_del(&blkif->entry_ctx); list_del(&blkif->entry); tapdisk_xenio_ctx_put(blkif->ctx); } err = td_metrics_vbd_stop(&blkif->vbd_stats); if (unlikely(err)) EPRINTF("failed to destroy blkfront stats file: %s\n", strerror(-err)); err = tapdisk_xenblkif_stats_destroy(blkif); if (unlikely(err)) { EPRINTF("failed to clean up ring stats file: %s (error ignored)\n", strerror(-err)); err = 0; } free(blkif); return err; }
static void *handle_mount(void *data) { int more, notify; struct fs_mount *mount = (struct fs_mount *)data; printf("Starting a thread for mount: %d\n", mount->mount_id); allocate_request_array(mount); for(;;) { int nr_consumed=0; RING_IDX cons, rp; struct fsif_request *req; handle_aio_events(mount); moretodo: rp = mount->ring.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ while ((cons = mount->ring.req_cons) != rp) { int i; struct fs_op *op; printf("Got a request at %d (of %d)\n", cons, RING_SIZE(&mount->ring)); req = RING_GET_REQUEST(&mount->ring, cons); printf("Request type=%d\n", req->type); for(i=0;;i++) { op = fsops[i]; if(op == NULL) { /* We've reached the end of the array, no appropirate * handler found. Warn, ignore and continue. */ printf("WARN: Unknown request type: %d\n", req->type); mount->ring.req_cons++; break; } if(op->type == req->type) { /* There needs to be a dispatch handler */ assert(op->dispatch_handler != NULL); op->dispatch_handler(mount, req); break; } } nr_consumed++; } printf("Backend consumed: %d requests\n", nr_consumed); RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more); if(more) goto moretodo; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify); printf("Pushed responces and notify=%d\n", notify); if(notify) xc_evtchn_notify(mount->evth, mount->local_evtchn); } printf("Destroying thread for mount: %d\n", mount->mount_id); xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1); xc_gnttab_close(mount->gnth); xc_evtchn_unbind(mount->evth, mount->local_evtchn); xc_evtchn_close(mount->evth); free(mount->frontend); pthread_exit(NULL); }