/** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { int err; err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } return 0; }
static int evtchn_bind_to_user(struct per_user_data *u, int port) { int rc = 0; /* * Ports are never reused, so every caller should pass in a * unique port. * * (Locking not necessary because we haven't registered the * interrupt handler yet, and our caller has already * serialized bind operations.) */ BUG_ON(get_port_user(port) != NULL); set_port_user(port, u); set_port_enabled(port, true); /* start enabled */ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, u->name, (void *)(unsigned long)port); if (rc >= 0) rc = evtchn_make_refcounted(port); else { /* bind failed, should close the port now */ struct evtchn_close close; close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); set_port_user(port, NULL); } return rc; }
int gnt_init(void) { int mfn; int err; struct as_sring *sring; struct evtchn_alloc_unbound alloc_unbound; printk(KERN_INFO "gnt_init\n"); page = __get_free_pages(GFP_KERNEL, 0); if (page == 0) { printk(KERN_DEBUG "\nxen:DomU:could not get free page"); return 0; } sring = (struct as_sring *)page; SHARED_RING_INIT(sring); FRONT_RING_INIT(&(info.ring), sring, PAGE_SIZE); mfn = virt_to_mfn(page); printk(KERN_INFO "grant foreign access\n"); info.gref = gnttab_grant_foreign_access(DOM0_ID, mfn, 0); if (info.gref < 0) { printk(KERN_DEBUG "\nxen:could not grant foreign access"); free_page((unsigned long)page); info.ring.sring = NULL; return 0; } printk(KERN_DEBUG "\n gref = %d", info.gref); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOM0_ID; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { printk(KERN_DEBUG "\nalloc unbound port failure"); return err; } err = bind_evtchn_to_irqhandler(alloc_unbound.port, as_int, 0, "xen-eg", &info); if (err < 0) { printk(KERN_DEBUG "\nbind evtchn to irqhandler failure"); return err; } info.irq = err; info.port = alloc_unbound.port; printk(KERN_DEBUG " interrupt = %d, local_port = %d", info.irq, info.port); printk("...\n..."); create_procfs_entry(); return 0; }
static int xenkbd_connect_backend(struct xenbus_device *dev, struct xenkbd_info *info) { int ret, evtchn; struct xenbus_transaction xbt; ret = xenbus_alloc_evtchn(dev, &evtchn); if (ret) return ret; ret = bind_evtchn_to_irqhandler(evtchn, input_handler, 0, dev->devicetype, info); if (ret < 0) { xenbus_free_evtchn(dev, evtchn); xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler"); return ret; } info->irq = ret; again: ret = xenbus_transaction_start(&xbt); if (ret) { xenbus_dev_fatal(dev, ret, "starting transaction"); return ret; } ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", virt_to_mfn(info->page)); if (ret) goto error_xenbus; ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", evtchn); if (ret) goto error_xenbus; ret = xenbus_transaction_end(xbt, 0); if (ret) { if (ret == -EAGAIN) goto again; xenbus_dev_fatal(dev, ret, "completing transaction"); return ret; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; error_xenbus: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, ret, "writing xenstore"); return ret; }
static int setup_ixpring(struct xenbus_device *dev, struct ixpfront_info *info) { struct ixp_sring *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (struct ixp_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, ixp_interrupt, IRQF_SAMPLE_RANDOM, "ixp", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); goto fail; } info->irq = err; return 0; fail: ixp_free(info, 0); return err; }
/* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { int err; if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_evtchn_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; }
static int evtchn_bind_to_user(struct per_user_data *u, int port) { int rc = 0; /* * Ports are never reused, so every caller should pass in a * unique port. * * (Locking not necessary because we haven't registered the * interrupt handler yet, and our caller has already * serialized bind operations.) */ BUG_ON(port_user[port] != NULL); port_user[port] = u; rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, u->name, (void *)(unsigned long)port); if (rc >= 0) rc = 0; return rc; }
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) { blkif_sring_t *sring; int err; struct evtchn_bind_interdomain bind_interdomain; if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL) return -ENOMEM; err = map_frontend_page(blkif, shared_page); if (err) { free_vm_area(blkif->blk_ring_area); return err; } bind_interdomain.remote_dom = blkif->domid; bind_interdomain.remote_port = evtchn; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); return err; } blkif->evtchn = bind_interdomain.local_port; sring = (blkif_sring_t *)blkif->blk_ring_area->addr; BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); blkif->irq = bind_evtchn_to_irqhandler( blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif); blkif->status = CONNECTED; return 0; }
/* caller must clean up in case of errors */ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) { struct xenbus_transaction xbt; const char *message = NULL; int rv; grant_ref_t gref; priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); if (!priv->shr) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); if (rv < 0) return rv; priv->ring_ref = gref; rv = xenbus_alloc_evtchn(dev, &priv->evtchn); if (rv) return rv; rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, "tpmif", priv); if (rv <= 0) { xenbus_dev_fatal(dev, rv, "allocating TPM irq"); return rv; } priv->irq = rv; again: rv = xenbus_transaction_start(&xbt); if (rv) { xenbus_dev_fatal(dev, rv, "starting transaction"); return rv; } rv = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", priv->ring_ref); if (rv) { message = "writing ring-ref"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", priv->evtchn); if (rv) { message = "writing event-channel"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); if (rv) { message = "writing feature-protocol-v2"; goto abort_transaction; } rv = xenbus_transaction_end(xbt, 0); if (rv == -EAGAIN) goto again; if (rv) { xenbus_dev_fatal(dev, rv, "completing transaction"); return rv; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_error(dev, rv, "%s", message); return rv; }
static int xenkbd_connect_backend(struct xenbus_device *dev, struct xenkbd_info *info) { int ret, evtchn; struct xenbus_transaction xbt; ret = gnttab_grant_foreign_access(dev->otherend_id, virt_to_gfn(info->page), 0); if (ret < 0) return ret; info->gref = ret; ret = xenbus_alloc_evtchn(dev, &evtchn); if (ret) goto error_grant; ret = bind_evtchn_to_irqhandler(evtchn, input_handler, 0, dev->devicetype, info); if (ret < 0) { xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler"); goto error_evtchan; } info->irq = ret; again: ret = xenbus_transaction_start(&xbt); if (ret) { xenbus_dev_fatal(dev, ret, "starting transaction"); goto error_irqh; } ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", virt_to_gfn(info->page)); if (ret) goto error_xenbus; ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); if (ret) goto error_xenbus; ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", evtchn); if (ret) goto error_xenbus; ret = xenbus_transaction_end(xbt, 0); if (ret) { if (ret == -EAGAIN) goto again; xenbus_dev_fatal(dev, ret, "completing transaction"); goto error_irqh; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; error_xenbus: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, ret, "writing xenstore"); error_irqh: unbind_from_irqhandler(info->irq, info); info->irq = -1; error_evtchan: xenbus_free_evtchn(dev, evtchn); error_grant: gnttab_end_foreign_access(info->gref, 0, 0UL); info->gref = -1; return ret; }
int netif_map(netif_t *netif, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int evtchn) { int err = -ENOMEM; netif_tx_sring_t *txs; netif_rx_sring_t *rxs; struct evtchn_bind_interdomain bind_interdomain; /* Already connected through? */ if (netif->irq) return 0; netif->tx_comms_area = alloc_vm_area(PAGE_SIZE); if (netif->tx_comms_area == NULL) return -ENOMEM; netif->rx_comms_area = alloc_vm_area(PAGE_SIZE); if (netif->rx_comms_area == NULL) goto err_rx; err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref); if (err) goto err_map; bind_interdomain.remote_dom = netif->domid; bind_interdomain.remote_port = evtchn; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) goto err_hypervisor; netif->evtchn = bind_interdomain.local_port; netif->irq = bind_evtchn_to_irqhandler( netif->evtchn, netif_be_int, 0, netif->dev->name, netif); disable_irq(netif->irq); txs = (netif_tx_sring_t *)netif->tx_comms_area->addr; BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE); rxs = (netif_rx_sring_t *) ((char *)netif->rx_comms_area->addr); BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE); netif->rx_req_cons_peek = 0; netif_get(netif); wmb(); /* Other CPUs see new state before interface is started. */ rtnl_lock(); netif->status = CONNECTED; wmb(); if (netif_running(netif->dev)) __netif_up(netif); rtnl_unlock(); return 0; err_hypervisor: unmap_frontend_pages(netif); err_map: free_vm_area(netif->rx_comms_area); err_rx: free_vm_area(netif->tx_comms_area); return err; }
/** * Connect the OpenXT input device to the corresponding backend. * * @param dev The device to be connected. * @param info The information structure that corresponds to the given device. * * @return int Zero on success, or an error code on failure. */ static int oxtkbd_connect_backend(struct xenbus_device *dev, struct openxt_kbd_info *info) { int ret, evtchn; struct xenbus_transaction xbt; //To communicate with the backend, we'll share a single page of memory //We'll start this process by granting out our "shared page". ret = gnttab_grant_foreign_access(dev->otherend_id, virt_to_mfn(info->page), 0); if (ret < 0) return ret; info->gref = ret; //Next, we'll need to create an event channel we can use to signal that data //has changed in our shared page. ret = xenbus_alloc_evtchn(dev, &evtchn); if (ret) goto error_grant; //Bind our input handler to our event channel-- ensuring we're recieve any //"new data" notifications. ret = bind_evtchn_to_irqhandler(evtchn, input_handler, 0, dev->devicetype, info); if (ret < 0) { xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler"); goto error_evtchan; } info->irq = ret; again: //Now that we've set up our shared assets, we'll need to communicate them //to the backend. First, we'll start a xenbus transaction, so we can dump //all of our data into the XenStore simultaneously. ret = xenbus_transaction_start(&xbt); if (ret) { xenbus_dev_fatal(dev, ret, "starting transaction"); goto error_irqh; } //Provide a direct reference to the page. This allows backends that want //to use foreign mappings (i.e. legacy backends) to map in the shared page //without touching grants. ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", virt_to_mfn(info->page)); if (ret) goto error_xenbus; //And provide our grant reference. This is the preferred way of getting the //shared page. ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); if (ret) goto error_xenbus; //Provide the number for our event channel, so the backend can signal //new informatino to us. ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", evtchn); if (ret) goto error_xenbus; //Attempt to apply all of our changes at once. ret = xenbus_transaction_end(xbt, 0); //If our transaction failed... if (ret) { //... it may have been because the XenStore was busy. If this is the case, //repeat out transaction until we succeed, or hit an error. if (ret == -EAGAIN) goto again; //Otherwise, we couldn't connect. Bail out! xenbus_dev_fatal(dev, ret, "completing transaction"); goto error_irqh; } //Finally, switch our state to "intialized", hopefully cueing the backend //to connect. xenbus_switch_state(dev, XenbusStateInitialised); return 0; error_xenbus: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, ret, "writing xenstore"); error_irqh: unbind_from_irqhandler(info->irq, info); info->irq = -1; error_evtchan: xenbus_free_evtchn(dev, evtchn); error_grant: gnttab_end_foreign_access(info->gref, 0, 0UL); info->gref = -1; return ret; }