static void update_blkif_status(blkif_t *blkif) { int err; /* Not ready to connect? */ if (!blkif->irq || !blkif->vbd.bdev) return; /* Already connected? */ if (blkif->be->dev->state == XenbusStateConnected) return; /* Attempt to connect: exit if we fail to. */ connect(blkif->be); if (blkif->be->dev->state != XenbusStateConnected) return; blkif->xenblkd = kthread_run(blkif_schedule, blkif, "xvd %d %02x:%02x", blkif->domid, blkif->be->major, blkif->be->minor); if (IS_ERR(blkif->xenblkd)) { err = PTR_ERR(blkif->xenblkd); blkif->xenblkd = NULL; xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); } }
static void maybe_connect(struct backend_info *be) { int err; if (be->tpmif == NULL || be->tpmif->status == CONNECTED) return; connect(be); /* * Notify the vTPM manager about a new front-end. */ err = tpmif_vtpm_open(be->tpmif, be->frontend_id, be->instance); if (err) { xenbus_dev_error(be->dev, err, "queueing vtpm open packet"); /* * Should close down this device and notify FE * about closure. */ return; } }
static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; }
/** * Callback received when the backend's state changes. */ static void ixpback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct ixpfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "ixpfront:ixpback_changed to state %d.\n", backend_state); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: ixpfront_connect(info); break; case XenbusStateClosing: if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else ixpfront_closing(info); break; } }
static int __devinit pcifront_attach_devices(struct pcifront_device *pdev) { int err = -EFAULT; int i, num_roots, len; unsigned int domain, bus; char str[64]; spin_lock(&pdev->dev_lock); if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateReconfiguring) goto out; err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num", "%d", &num_roots); if (err == -ENOENT) { xenbus_dev_error(pdev->xdev, err, "No PCI Roots found, trying 0000:00"); err = pcifront_rescan_root(pdev, 0, 0); num_roots = 0; } else if (err != 1) { if (err == 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading number of PCI roots"); goto out; } for (i = 0; i < num_roots; i++) { len = snprintf(str, sizeof(str), "root-%d", i); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%x:%x", &domain, &bus); if (err != 2) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading PCI root %d", i); goto out; } err = pcifront_rescan_root(pdev, domain, bus); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error scanning PCI root %04x:%02x", domain, bus); goto out; } } xenbus_switch_state(pdev->xdev, XenbusStateConnected); out: spin_unlock(&pdev->dev_lock); return err; }
int net_accel_grant_page(struct xenbus_device *dev, unsigned long mfn, int is_iomem) { int err = gnttab_grant_foreign_access(dev->otherend_id, mfn, is_iomem ? GTF_PCD : 0); if (err < 0) xenbus_dev_error(dev, err, "failed granting access to page\n"); return err; }
static void xen_chrif_thread(struct xen_chrif *chrif) { int err; char *name = "chrthread"; chrif->xenchrd = kthread_run(xen_chrif_schedule, chrif, name); if (IS_ERR(chrif->xenchrd)) { err = PTR_ERR(chrif->xenchrd); chrif->xenchrd = NULL; xenbus_dev_error(chrif->be->dev, err, "start xenchrd thread"); } }
static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; int err; err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref, "event-channel", "%u", &evtchn, NULL); if (err) { xenbus_dev_error(dev, err, "reading %s/ring-ref and event-channel", dev->otherend); return err; } if (!be->tpmif) { be->tpmif = tpmif_find(dev->otherend_id, be->instance); if (IS_ERR(be->tpmif)) { err = PTR_ERR(be->tpmif); be->tpmif = NULL; xenbus_dev_fatal(dev,err,"creating vtpm interface"); return err; } } if (be->tpmif != NULL) { err = tpmif_map(be->tpmif, ring_ref, evtchn); if (err) { xenbus_dev_error(dev, err, "mapping shared-frame %lu port %u", ring_ref, evtchn); return err; } } return 0; }
/** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; }
static void xen_update_blkif_status(struct xen_blkif *blkif) { int err; char name[TASK_COMM_LEN]; /* Not ready to connect? */ if (!blkif->irq || !blkif->vbd.bdev) return; /* Already connected? */ if (blkif->be->dev->state == XenbusStateConnected) return; /* Attempt to connect: exit if we fail to. */ connect(blkif->be); if (blkif->be->dev->state != XenbusStateConnected) return; err = blkback_name(blkif, name); if (err) { xenbus_dev_error(blkif->be->dev, err, "get blkback dev name"); return; } err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); if (err) { xenbus_dev_error(blkif->be->dev, err, "block flush"); return; } invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name); if (IS_ERR(blkif->xenblkd)) { err = PTR_ERR(blkif->xenblkd); blkif->xenblkd = NULL; xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); } }
int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); #if defined(CONFIG_XEN) || defined(MODULE) return -ENODEV; #else return err; #endif }
int xenbus_free_evtchn(device_t dev, evtchn_port_t port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) { xenbus_dev_error(dev, -err, "freeing event channel %d", port); return (-err); } return (0); }
static int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; }
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id) { struct backendinfo* binfo; binfo = kmalloc(sizeof(*binfo),GFP_KERNEL); if (!binfo) { xenbus_dev_error(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } memset(binfo, 0, sizeof(*binfo)); binfo->dev = dev; printk(KERN_ALERT"\nxen: dom0: Probe fired!"); //get ring_gref, op_gref and port by xenStore xenbus_scanf(XBT_NIL, binfo->dev->otherend, "ring_gref", "%u", &info.ring_gref); xenbus_scanf(XBT_NIL, binfo->dev->otherend, "port", "%u", &info.evtchn); // xenbus_scanf(XBT_NIL, binfo->dev->otherend, "op_gref", "%u", &info.op_gref); printk("\nxen: dom0: Xenstore read port, ring_gref and op_gref success: %d, %d ", info.evtchn, info.ring_gref); info.remoteDomain = binfo->dev->otherend_id; connection_establishment(); // op_page = map_sharedpage(info.op_gref); return 0; }
/* Based on xenbus_backend_client.c:xenbus_unmap_ring() */ static int net_accel_unmap_grant(struct xenbus_device *dev, grant_handle_t handle, void *vaddr, u64 dev_bus_addr, unsigned flags) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, flags, handle); if (dev_bus_addr) op.dev_bus_addr = dev_bus_addr; BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "failed unmapping page at handle %d error %d\n", handle, op.status); return op.status; }
/* Based on xenbus_backend_client.c:xenbus_map_ring() */ static int net_accel_map_grant(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr, u64 *dev_bus_addr, unsigned flags) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, flags, gnt_ref, dev->otherend_id); BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)); if (op.status != GNTST_okay) { xenbus_dev_error (dev, op.status, "failed mapping in shared page %d from domain %d\n", gnt_ref, dev->otherend_id); } else { *handle = op.handle; if (dev_bus_addr) *dev_bus_addr = op.dev_bus_addr; } return op.status; }
/** * xenbus_map_ring_valloc * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /** * xenbus_map_ring * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /** * xenbus_unmap_ring_vfree * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_valloc, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); /** * xenbus_unmap_ring * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
/* caller must clean up in case of errors */ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) { struct xenbus_transaction xbt; const char *message = NULL; int rv; grant_ref_t gref; priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); if (!priv->shr) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); if (rv < 0) return rv; priv->ring_ref = gref; rv = xenbus_alloc_evtchn(dev, &priv->evtchn); if (rv) return rv; rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, "tpmif", priv); if (rv <= 0) { xenbus_dev_fatal(dev, rv, "allocating TPM irq"); return rv; } priv->irq = rv; again: rv = xenbus_transaction_start(&xbt); if (rv) { xenbus_dev_fatal(dev, rv, "starting transaction"); return rv; } rv = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", priv->ring_ref); if (rv) { message = "writing ring-ref"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", priv->evtchn); if (rv) { message = "writing event-channel"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); if (rv) { message = "writing feature-protocol-v2"; goto abort_transaction; } rv = xenbus_transaction_end(xbt, 0); if (rv == -EAGAIN) goto again; if (rv) { xenbus_dev_fatal(dev, rv, "completing transaction"); return rv; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_error(dev, rv, "%s", message); return rv; }
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id) { struct backendinfo* binfo; // int i; // char *p; struct vm_struct *v_start; int err; as_sring_t *sring; char *gref,*port; binfo = kmalloc(sizeof(*binfo),GFP_KERNEL); if (!binfo) { xenbus_dev_error(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } memset(binfo,0,sizeof(*binfo)); binfo->dev = dev; printk(KERN_ALERT"\nProbe fired!\n"); gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL); port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL); info.gref=mycti(gref); info.evtchn=mycti(port); printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref); info.remoteDomain = binfo->dev->otherend_id; printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref); v_start = alloc_vm_area(PAGE_SIZE,NULL); if(v_start == 0){ free_vm_area(v_start); printk("\nxen: dom0:could not allocate page"); return -EFAULT; } gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain); if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){ printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed"); return -EFAULT; } if (ops.status) { printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status); return -EFAULT; } printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status); unmap_ops.host_addr = (unsigned long)(v_start->addr); unmap_ops.handle = ops.handle; ////////////// /* p = (char *)(v_start->addr) + PAGE_SIZE - 1; printk(KERN_DEBUG "\nbytes in page"); for (i = 0;i <= 10; i++, p--) { printk(KERN_DEBUG "%c", *p); } */ //////////////// sring = (as_sring_t *)v_start->addr; BACK_RING_INIT(&info.ring, sring, PAGE_SIZE); err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info); if (err < 0) { printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn"); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1); return -EFAULT; } info.irq = err; printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq); return 0; }
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id) { int err = 0; int nr_page = 0; struct xenbus_transaction xbt; struct backend_info *be; be = kmalloc(sizeof(*be), GFP_KERNEL); if (!be) { xenbus_dev_error(dev, -ENOMEM, "allocating backend structure"); return -ENOMEM; } be->dev = dev; dev_set_drvdata(&dev->dev, be); printk(KERN_ALERT"\nxen: dom0: Probe fired! otherend_id: %d", dev->otherend_id); be->chrif = xen_chrif_alloc(dev->otherend_id); if (IS_ERR(be->chrif)) { be->chrif = NULL; xenbus_dev_fatal(dev, err, "creating char interface"); err = -1; goto fail; } /** * * write backend info to xenStore **/ /* don't have pdma, in order to test others, so note this */ if(pdma_info_flags == 0){ pdma_info_flags = -1; err = block_info_to_front(); printk(KERN_ALERT"\nxen: dom0: only one time get block info"); } // be->chrif->pdma_info.wt_block_sz = 16384; be->chrif->pdma_info = pdma_info; if(err){ printk(KERN_DEBUG "\nxen: Dom0: fail to write wt_block_sz to xenStore"); }else{ printk(KERN_DEBUG "\nxen: Dom0: wt_block_sz: 0x%lx", be->chrif->pdma_info.wt_block_sz); xenbus_transaction_start(&xbt); xenbus_printf(xbt, dev->nodename, "wt_block_sz", "%u", be->chrif->pdma_info.wt_block_sz); xenbus_printf(xbt, dev->nodename, "rd_block_sz", "%u", be->chrif->pdma_info.rd_block_sz); xenbus_printf(xbt, dev->nodename, "wt_pool_sz", "%lu", be->chrif->pdma_info.wt_pool_sz); xenbus_printf(xbt, dev->nodename, "rd_pool_sz", "%lu", be->chrif->pdma_info.rd_pool_sz); xenbus_transaction_end(xbt, 0); } err = xenbus_switch_state(dev, XenbusStateInitWait); printk("\nxen: dom0: state changed to XenbusStateInitWait"); if(err) goto fail; /* setup back pointer */ be->chrif->be = be; //alloc vm_struct nr_page = be->chrif->pdma_info.wt_block_sz/4096; printk("\nxen: dom0: alloc map_pages nr_page: %d", nr_page); be->chrif->map_pages = kmalloc(sizeof(unsigned long) * nr_page, GFP_KERNEL); memset(be->chrif->map_pages, 0, nr_page * sizeof(unsigned long)); return 0; fail: printk("\nxen: dom0: probe failed"); chrback_remove(dev); return err; }
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state);