static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; unsigned int pers_grants; char protocol[64] = ""; int err; DPRINTK("%s", dev->otherend); err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", "%63s", protocol, NULL); if (err) strcpy(protocol, "unspecified, assuming native"); else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; else { xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -1; } err = xenbus_gather(XBT_NIL, dev->otherend, "feature-persistent", "%u", &pers_grants, NULL); if (err) pers_grants = 0; be->blkif->vbd.feature_gnt_persistent = pers_grants; be->blkif->vbd.overflow_max_grants = 0; pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", ring_ref, evtchn, be->blkif->blk_protocol, protocol, pers_grants ? "persistent grants" : ""); /* Map the shared frame, irq etc. */ err = xen_blkif_map(be->blkif, ring_ref, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u", ring_ref, evtchn); return err; } return 0; }
int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; }
static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; int err; DPRINTK("%s", dev->otherend); err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } /* Map the shared frame, irq etc. */ err = blkif_map(be->blkif, ring_ref, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u", ring_ref, evtchn); return err; } return 0; }
/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; }
int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { /* add for vmdq migrate.When the device is vmdq_vnic ,return */ if(0 == strcmp(xendev->devicetype, VMDQ_VNIC)){ return 0; } int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; }
static int xen_pcibk_attach(struct xen_pcibk_device *pdev) { int err = 0; int gnt_ref, remote_evtchn; char *magic = NULL; mutex_lock(&pdev->dev_lock); /* Make sure we only do this setup once */ if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateInitialised) goto out; /* Wait for frontend to state that it has published the configuration */ if (xenbus_read_driver_state(pdev->xdev->otherend) != XenbusStateInitialised) goto out; dev_dbg(&pdev->xdev->dev, "Reading frontend config\n"); err = xenbus_gather(XBT_NIL, pdev->xdev->otherend, "pci-op-ref", "%u", &gnt_ref, "event-channel", "%u", &remote_evtchn, "magic", NULL, &magic, NULL); if (err) { /* If configuration didn't get read correctly, wait longer */ xenbus_dev_fatal(pdev->xdev, err, "Error reading configuration from frontend"); goto out; } if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) { xenbus_dev_fatal(pdev->xdev, -EFAULT, "version mismatch (%s/%s) with pcifront - " "halting " DRV_NAME, magic, XEN_PCI_MAGIC); err = -EFAULT; goto out; } err = xen_pcibk_do_attach(pdev, gnt_ref, remote_evtchn); if (err) goto out; dev_dbg(&pdev->xdev->dev, "Connecting...\n"); err = xenbus_switch_state(pdev->xdev, XenbusStateConnected); if (err) xenbus_dev_fatal(pdev->xdev, err, "Error switching to connected state!"); dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err); out: mutex_unlock(&pdev->dev_lock); kfree(magic); return err; }
static int connect_ctrl_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; struct xenvif *vif = be->vif; unsigned int val; grant_ref_t ring_ref; unsigned int evtchn; int err; err = xenbus_gather(XBT_NIL, dev->otherend, "ctrl-ring-ref", "%u", &val, NULL); if (err) goto done; /* The frontend does not have a control ring */ ring_ref = val; err = xenbus_gather(XBT_NIL, dev->otherend, "event-channel-ctrl", "%u", &val, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/event-channel-ctrl", dev->otherend); goto fail; } evtchn = val; err = xenvif_connect_ctrl(vif, ring_ref, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frame %u port %u", ring_ref, evtchn); goto fail; } done: return 0; fail: return err; }
static int cfg_frontend_info(struct xenbus_device *dev, struct netback_accel *bend, int *grants) { /* Get some info from xenbus on the event channel and shmem grant */ int err = xenbus_gather(XBT_NIL, dev->otherend, "accel-msg-channel", "%u", &bend->msg_channel, "accel-ctrl-page", "%d", &(grants[0]), "accel-msg-page", "%d", &(grants[1]), "accel-net-channel", "%u", &bend->net_channel, NULL); if (err) EPRINTK("failed to read event channels or shmem grant: %d\n", err); else DPRINTK("got event chan %d and net chan %d from frontend\n", bend->msg_channel, bend->net_channel); return err; }
static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; int err; err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_error(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } if (!be->tpmif) { be->tpmif = tpmif_find(dev->otherend_id, be->instance); if (IS_ERR(be->tpmif)) { err = PTR_ERR(be->tpmif); be->tpmif = NULL; xenbus_dev_fatal(dev,err,"creating vtpm interface"); return err; } } if (be->tpmif != NULL) { err = tpmif_map(be->tpmif, ring_ref, evtchn); if (err) { xenbus_dev_error(dev, err, "mapping shared-frame %lu port %u", ring_ref, evtchn); return err; } } return 0; }
static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; int err; //get ring_ref, evtchn from xenStore err = xenbus_gather(XBT_NIL, be->dev->otherend, "ring-ref", "%u", &ring_ref, "event-channel", "%u", &evtchn, NULL); if(err){ xenbus_dev_fatal(dev, err, "reading ring_ref, evtchn fail"); } printk("\nxen: dom0: Xenstore read ring_ref,evtchn: %d, %d ",ring_ref, evtchn); printk("\nxen: dom0: Xenstore otherend : %s", be->dev->otherend); err = chrif_map(be->chrif, ring_ref, evtchn); printk("\nxen: dom0: connect ring finished,otherend_id"); return 0; }
/* Read the limits values of the xenbus structure. */ static void cfg_hw_quotas(struct xenbus_device *dev, struct netback_accel *bend) { int err = xenbus_gather (XBT_NIL, dev->nodename, "limits/max-filters", "%d", &bend->quotas.max_filters, "limits/max-buf-pages", "%d", &bend->quotas.max_buf_pages, "limits/max-mcasts", "%d", &bend->quotas.max_mcasts, NULL); if (err) { /* * TODO what if they have previously been set by the * user? This will overwrite with defaults. Maybe * not what we want to do, but useful in startup * case */ DPRINTK("Failed to read quotas from xenbus, using defaults\n"); bend->quotas.max_filters = NETBACK_ACCEL_DEFAULT_MAX_FILTERS; bend->quotas.max_buf_pages = sfc_netback_max_pages; bend->quotas.max_mcasts = NETBACK_ACCEL_DEFAULT_MAX_MCASTS; } return; }
static int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NULL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return (err); } if (strlen(xendev->otherend) == 0 || !xenbus_exists_dir(xendev->otherend, "")) { xenbus_dev_fatal(xendev, X_ENOENT, "missing other end from %s", xendev->nodename); kmem_free((void *)xendev->otherend, strlen(xendev->otherend) + 1); xendev->otherend = NULL; return (X_ENOENT); } return (0); }
static int connect_rings(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned long tx_ring_ref, rx_ring_ref; unsigned int tx_evtchn, rx_evtchn, rx_copy; int err; int val; err = xenbus_gather(XBT_NIL, dev->otherend, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dev->otherend); return err; } /* Try split event channels first, then single event channel. */ err = xenbus_gather(XBT_NIL, dev->otherend, "event-channel-tx", "%u", &tx_evtchn, "event-channel-rx", "%u", &rx_evtchn, NULL); if (err < 0) { err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u", &tx_evtchn); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel(-tx/rx)", dev->otherend); return err; } rx_evtchn = tx_evtchn; } err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (vif->dev->tx_queue_len != 0) { if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (val) vif->can_queue = 1; else /* Must be non-zero for pfifo_fast to work. */ vif->dev->tx_queue_len = 1; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; vif->gso_mask = 0; vif->gso_prefix_mask = 0; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV6); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV6); if (vif->gso_mask & vif->gso_prefix_mask) { xenbus_dev_fatal(dev, err, "%s: gso and gso prefix flags are not " "mutually exclusive", dev->otherend); return -EOPNOTSUPP; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->ip_csum = !val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; vif->ipv6_csum = !!val; /* Map the shared frame, irq etc. */ err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port tx %u rx %u", tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); return err; } return 0; }
static int connect_data_rings(struct backend_info *be, struct xenvif_queue *queue) { struct xenbus_device *dev = be->dev; unsigned int num_queues = queue->vif->num_queues; unsigned long tx_ring_ref, rx_ring_ref; unsigned int tx_evtchn, rx_evtchn; int err; char *xspath; size_t xspathsize; const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ /* If the frontend requested 1 queue, or we have fallen back * to single queue due to lack of frontend support for multi- * queue, expect the remaining XenStore keys in the toplevel * directory. Otherwise, expect them in a subdirectory called * queue-N. */ if (num_queues == 1) { xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); if (!xspath) { xenbus_dev_fatal(dev, -ENOMEM, "reading ring references"); return -ENOMEM; } strcpy(xspath, dev->otherend); } else { xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; xspath = kzalloc(xspathsize, GFP_KERNEL); if (!xspath) { xenbus_dev_fatal(dev, -ENOMEM, "reading ring references"); return -ENOMEM; } snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, queue->id); } err = xenbus_gather(XBT_NIL, xspath, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref", xspath); goto err; } /* Try split event channels first, then single event channel. */ err = xenbus_gather(XBT_NIL, xspath, "event-channel-tx", "%u", &tx_evtchn, "event-channel-rx", "%u", &rx_evtchn, NULL); if (err < 0) { err = xenbus_scanf(XBT_NIL, xspath, "event-channel", "%u", &tx_evtchn); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel(-tx/rx)", xspath); goto err; } rx_evtchn = tx_evtchn; } /* Map the shared frame, irq etc. */ err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port tx %u rx %u", tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); goto err; } err = 0; err: /* Regular return falls through with err == 0 */ kfree(xspath); return err; }
static int connect_rings(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long tx_ring_ref, rx_ring_ref; unsigned int evtchn, rx_copy; int err; int val; DPRINTK(""); err = xenbus_gather(XBT_NIL, dev->otherend, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } be->netif->copying_receiver = !!rx_copy; if (be->netif->dev->tx_queue_len != 0) { if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (val) be->netif->can_queue = 1; else /* Must be non-zero for pfifo_fast to work. */ be->netif->dev->tx_queue_len = 1; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (val) { be->netif->features |= NETIF_F_SG; be->netif->dev->features |= NETIF_F_SG; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (val) { be->netif->features |= NETIF_F_TSO; be->netif->dev->features |= NETIF_F_TSO; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; if (val) { be->netif->features &= ~NETIF_F_IP_CSUM; be->netif->dev->features &= ~NETIF_F_IP_CSUM; } netdev_features_change(be->netif->dev); /* Map the shared frame, irq etc. */ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port %u", tx_ring_ref, rx_ring_ref, evtchn); return err; } return 0; }
static int connect_rings(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned long tx_ring_ref, rx_ring_ref; unsigned int evtchn, rx_copy; int err; int val; err = xenbus_gather(XBT_NIL, dev->otherend, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (vif->dev->tx_queue_len != 0) { if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (val) vif->can_queue = 1; else /* Must be non-zero for pfifo_fast to work. */ vif->dev->tx_queue_len = 1; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; vif->gso = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; vif->gso_prefix = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->csum = !val; /* Map the shared frame, irq etc. */ err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port %u", tx_ring_ref, rx_ring_ref, evtchn); return err; } return 0; }
static int connect_rings(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; grant_ref_t tx_ring_refs[MAX_PAGES_PER_RING], rx_ring_refs[MAX_PAGES_PER_RING]; unsigned int evtchn, rx_copy, nr_ring_pages; unsigned multipage; int i; int err; int val; char pathbuf[64]; err = xenbus_scanf(XBT_NIL, dev->otherend, "nr-ring-pages", "%u", &nr_ring_pages); if (err == -ENOENT) { err = 0; nr_ring_pages = 1; multipage = 0; } else { multipage = 1; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/nr-ring-pages", dev->otherend); return err; } if (nr_ring_pages < 1 || nr_ring_pages > MAX_PAGES_PER_RING || (!xen_pv_domain() && nr_ring_pages > 1) || (nr_ring_pages & (nr_ring_pages - 1))) { xenbus_dev_fatal(dev, err, "error in %s/nr-ring-pages: should be a power of two between 1 and %d, was %u", dev->otherend, xen_pv_domain() ? MAX_PAGES_PER_RING : 1, nr_ring_pages); return -EINVAL; } vif->nr_ring_pages = nr_ring_pages; err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u", &evtchn); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel", dev->otherend); return err; } if (multipage) { for (i = 0; i < nr_ring_pages; i++) { sprintf(pathbuf, "tx-ring-ref-%d", i); err = xenbus_scanf(XBT_NIL, dev->otherend, pathbuf, "%u", &tx_ring_refs[i]); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/%s", dev->otherend, pathbuf); return err; } sprintf(pathbuf, "rx-ring-ref-%d", i); err = xenbus_scanf(XBT_NIL, dev->otherend, pathbuf, "%u", &rx_ring_refs[i]); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/%s", dev->otherend, pathbuf); return err; } } } else { err = xenbus_gather(XBT_NIL, dev->otherend, "tx-ring-ref", "%lu", &tx_ring_refs[0], "rx-ring-ref", "%lu", &rx_ring_refs[0], NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-refs", dev->otherend); return err; } } err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (vif->dev->tx_queue_len != 0) { if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (val) vif->can_queue = 1; else /* Must be non-zero for pfifo_fast to work. */ vif->dev->tx_queue_len = 1; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; vif->gso = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; vif->gso_prefix = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->csum = !val; /* Map the shared frame, irq etc. */ err = xenvif_connect(vif, tx_ring_refs, rx_ring_refs, evtchn, nr_ring_pages); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %u/%u port %u", tx_ring_refs[0], rx_ring_refs[0], evtchn); return err; } return 0; }
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static int xenbus_resume(device_t dev) { device_t *kids; struct xenbus_device_ivars *ivars; int i, count, error; char *statepath; xb_init_comms(); xs_resume(); /* * We must re-examine each device and find the new path for * its backend. */ if (device_get_children(dev, &kids, &count) == 0) { for (i = 0; i < count; i++) { if (device_get_state(kids[i]) == DS_NOTPRESENT) continue; ivars = device_get_ivars(kids[i]); unregister_xenbus_watch( &ivars->xd_otherend_watch); ivars->xd_state = XenbusStateInitialising; /* * Find the new backend details and * re-register our watch. */ free(ivars->xd_otherend_path, M_DEVBUF); error = xenbus_gather(XBT_NIL, ivars->xd_node, "backend-id", "%i", &ivars->xd_otherend_id, "backend", NULL, &ivars->xd_otherend_path, NULL); if (error) return (error); DEVICE_RESUME(kids[i]); statepath = malloc(strlen(ivars->xd_otherend_path) + strlen("/state") + 1, M_DEVBUF, M_WAITOK); sprintf(statepath, "%s/state", ivars->xd_otherend_path); free(ivars->xd_otherend_watch.node, M_DEVBUF); ivars->xd_otherend_watch.node = statepath; register_xenbus_watch( &ivars->xd_otherend_watch); #if 0 /* * Can't do this yet since we are running in * the xenwatch thread and if we sleep here, * we will stop delivering watch notifications * and the device will never come back online. */ sx_xlock(&ivars->xd_lock); while (ivars->xd_state != XenbusStateClosed && ivars->xd_state != XenbusStateConnected) sx_sleep(&ivars->xd_state, &ivars->xd_lock, 0, "xdresume", 0); sx_xunlock(&ivars->xd_lock); #endif } free(kids, M_TEMP); } return (0); }
/** * xenbus_map_ring_valloc * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /** * xenbus_map_ring * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /** * xenbus_unmap_ring_vfree * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_valloc, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); /** * xenbus_unmap_ring * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static int xenbus_add_device(device_t dev, const char *bus, const char *type, const char *id) { device_t child; struct xenbus_device_ivars *ivars; enum xenbus_state state; char *statepath; int error; ivars = malloc(sizeof(struct xenbus_device_ivars), M_DEVBUF, M_ZERO|M_WAITOK); ivars->xd_node = kasprintf("%s/%s/%s", bus, type, id); if (xenbus_device_exists(dev, ivars->xd_node)) { /* * We are already tracking this node */ free(ivars->xd_node, M_DEVBUF); free(ivars, M_DEVBUF); return (0); } state = xenbus_read_driver_state(ivars->xd_node); if (state != XenbusStateInitialising) { /* * Device is not new, so ignore it. This can * happen if a device is going away after * switching to Closed. */ free(ivars->xd_node, M_DEVBUF); free(ivars, M_DEVBUF); return (0); } /* * Find the backend details */ error = xenbus_gather(XBT_NIL, ivars->xd_node, "backend-id", "%i", &ivars->xd_otherend_id, "backend", NULL, &ivars->xd_otherend_path, NULL); if (error) return (error); sx_init(&ivars->xd_lock, "xdlock"); ivars->xd_type = strdup(type, M_DEVBUF); ivars->xd_state = XenbusStateInitialising; statepath = malloc(strlen(ivars->xd_otherend_path) + strlen("/state") + 1, M_DEVBUF, M_WAITOK); sprintf(statepath, "%s/state", ivars->xd_otherend_path); ivars->xd_otherend_watch.node = statepath; ivars->xd_otherend_watch.callback = xenbus_backend_changed; child = device_add_child(dev, NULL, -1); ivars->xd_dev = child; device_set_ivars(child, ivars); return (0); }