static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; }
/* * map one page for ring */ static int map_frontend_pages(struct xen_chrif *chrif, grant_ref_t ring_ref) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, ring_ref, chrif->domid); if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)){ printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed"); return -EFAULT; } if (op.status) { struct gnttab_unmap_grant_ref unop; gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, op.handle); HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1); printk(KERN_DEBUG "\nxen: dom0: op.status fail"); return op.status; } /* record ring_ref and handle */ chrif->shmem_ref = ring_ref; chrif->shmem_handle = op.handle; printk(KERN_DEBUG "\nxen: dom0: map page success, page=%x, handle = %x, status = %x", (unsigned int)chrif->comms_area->addr, op.handle, op.status); printk("\nxen: dom0: map frontend pages finished,otherend_id"); return 0; }
static void unmap_frontend_page(struct xen_blkif *blkif) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, GNTMAP_host_map, blkif->shmem_handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); }
static void unmap_frontend_page(struct vscsibk_info *info) { struct gnttab_unmap_grant_ref op; int err; gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr, GNTMAP_host_map, info->shmem_handle); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); BUG_ON(err); }
static void unmap_frontend_pages(netif_t *netif) { struct gnttab_unmap_grant_ref op; int ret; gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr, GNTMAP_host_map, netif->tx_shmem_handle); lock_vm_area(netif->tx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(netif->tx_comms_area); BUG_ON(ret); gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr, GNTMAP_host_map, netif->rx_shmem_handle); lock_vm_area(netif->rx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(netif->rx_comms_area); BUG_ON(ret); }
/* * unmap one page of ring */ static void unmap_frontend_pages(struct xen_chrif *chrif) { struct gnttab_unmap_grant_ref unop; gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, chrif->shmem_handle); if(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1)) printk(KERN_DEBUG "\nxen:dom0: unmapped shared page failed"); free_vm_area(chrif->comms_area); printk(KERN_DEBUG "\nxen:dom0: unmapped frontend pages finished"); }
static void unmap_frontend_page(blkif_t *blkif) { struct gnttab_unmap_grant_ref op; int ret; gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, GNTMAP_host_map, blkif->shmem_handle); lock_vm_area(blkif->blk_ring_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(blkif->blk_ring_area); BUG_ON(ret); }
/* Based on xenbus_backend_client.c:xenbus_unmap_ring() */ static int net_accel_unmap_grant(struct xenbus_device *dev, grant_handle_t handle, void *vaddr, u64 dev_bus_addr, unsigned flags) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, flags, handle); if (dev_bus_addr) op.dev_bus_addr = dev_bus_addr; BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "failed unmapping page at handle %d error %d\n", handle, op.status); return op.status; }
int omx_xen_deregister_user_segment(omx_xenif_t * omx_xenif, uint32_t id, uint32_t sid, uint8_t eid) { struct gnttab_unmap_grant_ref ops; struct backend_info *be = omx_xenif->be; struct omxback_dev *dev = be->omxdev; struct omx_endpoint *endpoint = dev->endpoints[eid]; struct omx_xen_user_region *region; struct omx_xen_user_region_segment *seg; int i, k, ret = 0; unsigned int level; dprintk_in(); TIMER_START(&t_dereg_seg); if (eid < 0 && eid >= 255) { printk_err ("Wrong endpoint number (%u) check your frontend/backend communication!\n", eid); ret = -EINVAL; goto out; } region = rcu_dereference_protected(endpoint->xen_regions[id], 1); if (unlikely(!region)) { printk_err( "%s: Cannot access non-existing region %d\n", __func__, id); //ret = -EINVAL; goto out; } seg = ®ion->segments[sid]; TIMER_START(&t_release_grants); if (!seg->unmap) { printk_err("seg->unmap is NULL\n"); ret = -EINVAL; goto out; } gnttab_unmap_refs(seg->unmap, NULL, seg->pages, seg->nr_pages); TIMER_STOP(&t_release_grants); TIMER_START(&t_release_gref_list); for (k = 0; k < seg->nr_parts; k++) { #ifdef EXTRA_DEBUG_OMX if (!seg->vm_gref) { printk(KERN_ERR "vm_gref is NULL\n"); ret = -EFAULT; goto out; } if (!seg->vm_gref[k]) { printk(KERN_ERR "vm_gref[%d] is NULL\n", k); ret = -EFAULT; goto out; } if (!seg->vm_gref[k]->addr) { printk(KERN_ERR "vm_gref[%d]->addr is NULL\n", k); ret = -EFAULT; goto out; } if (!seg->all_handle[k]) { printk(KERN_ERR "all_handle[%d] is NULL\n", k); ret = -EINVAL; goto out; } #endif gnttab_set_unmap_op(&ops, (unsigned long)seg->vm_gref[k]->addr, GNTMAP_host_map | GNTMAP_contains_pte, seg->all_handle[k]); ops.host_addr = arbitrary_virt_to_machine(lookup_address ((unsigned long)(seg->vm_gref[k]-> addr), &level)).maddr; dprintk_deb("putting vm_area[%d] %#lx, handle = %#x \n", k, (unsigned long)seg->vm_gref[k], seg->all_handle[k]); if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref, &ops, 1)){ printk_err ("HYPERVISOR operation failed\n"); //BUG(); } if (ops.status) { printk_err ("HYPERVISOR unmap grant ref[%d]=%#lx failed status = %d", k, seg->all_handle[k], ops.status); ret = ops.status; goto out; } } TIMER_STOP(&t_release_gref_list); TIMER_START(&t_free_pages); for (k=0;k<seg->nr_parts;k++) if (ops.status == GNTST_okay) free_vm_area(seg->vm_gref[k]); kfree(seg->map); kfree(seg->unmap); kfree(seg->gref_list); #ifdef OMX_XEN_COOKIES omx_xen_page_put_cookie(omx_xenif, seg->cookie); #else free_xenballooned_pages(seg->nr_pages, seg->pages); kfree(seg->pages); #endif TIMER_STOP(&t_free_pages); out: TIMER_STOP(&t_dereg_seg); dprintk_out(); return ret; }
static int omx_xen_accept_gref_list(omx_xenif_t * omx_xenif, struct omx_xen_user_region_segment *seg, uint32_t gref, void **vaddr, uint8_t part) { int ret = 0; struct backend_info *be = omx_xenif->be; struct vm_struct *area; pte_t *pte; struct gnttab_map_grant_ref ops = { .flags = GNTMAP_host_map | GNTMAP_contains_pte, //.flags = GNTMAP_host_map, .ref = gref, .dom = be->remoteDomain, }; dprintk_in(); area = alloc_vm_area(PAGE_SIZE, &pte); if (!area) { ret = -ENOMEM; goto out; } seg->vm_gref[part] = area; ops.host_addr = arbitrary_virt_to_machine(pte).maddr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) { printk_err("HYPERVISOR map grant ref failed"); ret = -ENOSYS; goto out; } dprintk_deb("addr=%#lx, mfn=%#lx, kaddr=%#lx\n", (unsigned long)area->addr, ops.dev_bus_addr >> PAGE_SHIFT, ops.host_addr); if (ops.status) { printk_err("HYPERVISOR map grant ref failed status = %d", ops.status); ret = ops.status; goto out; } dprintk_deb("gref_offset = %#x\n", seg->gref_offset); *vaddr = (area->addr + seg->gref_offset); ret = ops.handle; #if 0 for (i = 0; i < (size + 2); i++) { dprintk_deb("gref_list[%d] = %u\n", i, *(((uint32_t *) * vaddr) + i)); } #endif seg->all_handle[part] = ops.handle; dprintk_deb("vaddr = %p, area->addr=%p, handle[%d]=%d\n", vaddr, area->addr, part, seg->all_handle[part]); out: dprintk_out(); return ret; } int omx_xen_register_user_segment(omx_xenif_t * omx_xenif, struct omx_ring_msg_register_user_segment *req) { struct backend_info *be = omx_xenif->be; void *vaddr = NULL; uint32_t **gref_list; struct page **page_list; struct omxback_dev *omxdev = be->omxdev; struct omx_endpoint *endpoint; struct omx_xen_user_region *region; struct omx_xen_user_region_segment *seg; int ret = 0; int i = 0, k = 0; uint8_t eid, nr_parts; uint16_t first_page_offset, gref_offset; uint32_t sid, id, nr_grefs, nr_pages, length, gref[OMX_XEN_GRANT_PAGES_MAX]; uint64_t domU_vaddr; int idx = 0, sidx = 0; struct gnttab_map_grant_ref *map; struct gnttab_unmap_grant_ref *unmap; dprintk_in(); TIMER_START(&t_reg_seg); sid = req->sid; id = req->rid; eid = req->eid; domU_vaddr = req->aligned_vaddr; nr_grefs = req->nr_grefs; nr_pages = req->nr_pages; nr_parts = req->nr_parts; length = req->length; dprintk_deb("nr_parts = %#x\n", nr_parts); for (k = 0; k < nr_parts; k++) { gref[k] = req->gref[k]; dprintk_deb("printing gref = %lu\n", gref[k]); } gref_offset = req->gref_offset; first_page_offset = req->first_page_offset; endpoint = omxdev->endpoints[eid]; region = rcu_dereference_protected(endpoint->xen_regions[id], 1); if (unlikely(!region)) { printk_err(KERN_ERR "Cannot access non-existing region %d\n", id); ret = -EINVAL; goto out; } dprintk_deb("Got region @%#lx id=%u\n", (unsigned long)region, id); seg = ®ion->segments[sid]; if (unlikely(!seg)) { printk(KERN_ERR "Cannot access non-existing segment %d\n", sid); ret = -EINVAL; goto out; } dprintk_deb("Got segment @%#lx id=%u\n", (unsigned long)seg, sid); seg->gref_offset = gref_offset; dprintk_deb ("Offset of actual list of grant references (in the frontend) = %#x\n", gref_offset); for (k = 0; k < nr_parts; k++) { seg->all_gref[k] = gref[k]; dprintk_deb("grant reference for list of grefs = %#x\n", gref[k]); } seg->nr_parts = nr_parts; dprintk_deb("parts of gref list = %#x\n", nr_parts); TIMER_START(&t_alloc_pages); gref_list = kzalloc(sizeof(uint32_t *) * nr_parts, GFP_ATOMIC); if (!gref_list) { ret = -ENOMEM; printk_err("gref list is NULL, ENOMEM!!!\n"); goto out; } map = kzalloc(sizeof(struct gnttab_map_grant_ref) * nr_pages, GFP_ATOMIC); if (!map) { ret = -ENOMEM; printk_err(" map is NULL, ENOMEM!!!\n"); goto out; } unmap = kzalloc(sizeof(struct gnttab_unmap_grant_ref) * nr_pages, GFP_ATOMIC); if (!unmap) { ret = -ENOMEM; printk_err(" unmap is NULL, ENOMEM!!!\n"); goto out; } #ifdef OMX_XEN_COOKIES seg->cookie = omx_xen_page_get_cookie(omx_xenif, nr_pages); if (!seg->cookie) { printk_err("cannot get cookie\n"); goto out; } page_list = seg->cookie->pages; #else page_list = kzalloc(sizeof(struct page *) * nr_pages, GFP_ATOMIC); if (!page_list) { ret = -ENOMEM; printk_err(" page list is NULL, ENOMEM!!!\n"); goto out; } ret = alloc_xenballooned_pages(nr_pages, page_list, false /* lowmem */); if (ret) { printk_err("cannot allocate xenballooned_pages\n"); goto out; } #endif TIMER_STOP(&t_alloc_pages); TIMER_START(&t_accept_gref_list); for (k = 0; k < nr_parts; k++) { ret = omx_xen_accept_gref_list(omx_xenif, seg, gref[k], &vaddr, k); if (ret < 0) { printk_err("Cannot accept gref list, = %d\n", ret); goto out; } gref_list[k] = (uint32_t *) vaddr; if (!gref_list) { printk_err("gref_list is NULL!!!, = %p\n", gref_list); ret = -ENOSYS; goto out; } } TIMER_STOP(&t_accept_gref_list); seg->gref_list = gref_list; seg->nr_pages = nr_pages; seg->first_page_offset = first_page_offset; i = 0; idx = 0; sidx = 0; seg->map = map; seg->unmap = unmap; while (i < nr_pages) { void *tmp_vaddr; unsigned long addr = (unsigned long)pfn_to_kaddr(page_to_pfn(page_list[i])); if (sidx % 256 == 0) dprintk_deb("gref_list[%d][%d] = %#x\n", idx, sidx, gref_list[idx][sidx]); gnttab_set_map_op(&map[i], addr, GNTMAP_host_map, gref_list[idx][sidx], be->remoteDomain); gnttab_set_unmap_op(&unmap[i], addr, GNTMAP_host_map, -1 /* handle */ ); i++; if ((unlikely(i % nr_grefs == 0))) { idx++; sidx = 0; } else { sidx++; } //printk(KERN_INFO "idx=%d, i=%d, sidx=%d\n", idx, i, sidx); } TIMER_START(&t_accept_grants); ret = gnttab_map_refs(map, NULL, page_list, nr_pages); if (ret) { printk_err("Error mapping, ret= %d\n", ret); goto out; } TIMER_STOP(&t_accept_grants); for (i = 0; i < nr_pages; i++) { if (map[i].status) { ret = -EINVAL; printk_err("idx %d, status =%d\n", i, map[i].status); goto out; } else { //BUG_ON(map->map_ops[i].handle == -1); unmap[i].handle = map[i].handle; dprintk_deb("map handle=%d\n", map[i].handle); } } seg->pages = page_list; seg->nr_pages = nr_pages; seg->length = length; region->total_length += length; dprintk_deb("total_length = %#lx, nrpages=%lu, pages = %#lx\n", region->total_length, seg->nr_pages, (unsigned long)seg->pages); goto all_ok; out: printk_err("error registering, try to debug MORE!!!!\n"); all_ok: TIMER_STOP(&t_reg_seg); dprintk_out(); return ret; } int omx_xen_create_user_region(omx_xenif_t * omx_xenif, uint32_t id, uint64_t vaddr, uint32_t nr_segments, uint32_t nr_pages, uint32_t nr_grefs, uint8_t eid) { struct backend_info *be = omx_xenif->be; struct omxback_dev *omxdev = be->omxdev; struct omx_endpoint *endpoint = omxdev->endpoints[eid]; struct omx_xen_user_region *region; int ret = 0; dprintk_in(); TIMER_START(&t_create_reg); //udelay(1000); /* allocate the relevant region */ region = kzalloc(sizeof(struct omx_xen_user_region) + nr_segments * sizeof(struct omx_xen_user_region_segment), GFP_KERNEL); if (!region) { printk_err ("No memory to allocate the region/segment buffers\n"); ret = -ENOMEM; goto out; } /* init stuff needed :S */ kref_init(®ion->refcount); region->total_length = 0; region->nr_vmalloc_segments = 0; region->total_registered_length = 0; region->id = id; region->nr_segments = nr_segments; region->eid = eid; region->endpoint = endpoint; region->dirty = 0; if (unlikely(rcu_access_pointer(endpoint->xen_regions[id]) != NULL)) { printk(KERN_ERR "Cannot create busy region %d\n", id); ret = -EBUSY; goto out; } rcu_assign_pointer(endpoint->xen_regions[id], region); out: TIMER_STOP(&t_create_reg); dprintk_out(); return ret; } /* Various region/segment handler functions */ void omx_xen_user_region_destroy_segments(struct omx_xen_user_region *region, struct omx_endpoint *endpoint) { int i; dprintk_in(); if (!endpoint) { printk_err("endpoint is null!!\n"); return; } for (i = 0; i < region->nr_segments; i++) omx_xen_deregister_user_segment(endpoint->be->omx_xenif, region->id, i, endpoint->endpoint_index); dprintk_out(); }