/** * gnttab_alloc_pages - alloc pages suitable for grant mapping into * @nr_pages: number of pages to alloc * @pages: returns the pages */ int gnttab_alloc_pages(int nr_pages, struct page **pages) { int i; int ret; ret = alloc_xenballooned_pages(nr_pages, pages, false); if (ret < 0) return ret; for (i = 0; i < nr_pages; i++) { #if BITS_PER_LONG < 64 struct xen_page_foreign *foreign; foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); if (!foreign) { gnttab_free_pages(nr_pages, pages); return -ENOMEM; } set_page_private(pages[i], (unsigned long)foreign); #endif SetPagePrivate(pages[i]); } return 0; }
static int __init xlated_setup_gnttab_pages(void) { struct page **pages; xen_pfn_t *pfns; int rc; unsigned int i; unsigned long nr_grant_frames = gnttab_max_grant_frames(); BUG_ON(nr_grant_frames == 0); pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); if (!pages) return -ENOMEM; pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); if (!pfns) { kfree(pages); return -ENOMEM; } rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); kfree(pages); kfree(pfns); return rc; } for (i = 0; i < nr_grant_frames; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames, &xen_auto_xlat_grant_frames.vaddr); if (rc) { pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); free_xenballooned_pages(nr_grant_frames, pages); kfree(pages); kfree(pfns); return rc; } kfree(pages); xen_auto_xlat_grant_frames.pfn = pfns; xen_auto_xlat_grant_frames.count = nr_grant_frames; return 0; }
/* * PVH: we need three things: virtual address, pfns, and mfns. The pfns * are allocated via ballooning, then we call arch_gnttab_map_shared to * allocate the VA and put pfn's in the pte's for the VA. The mfn's are * finally allocated in gnttab_map() by xen which also populates the P2M. */ static int xlated_setup_gnttab_pages(unsigned long numpages, void **addr) { int i, rc; unsigned long pfns[numpages]; struct page *pages[numpages]; rc = alloc_xenballooned_pages(numpages, pages, 0); if (rc != 0) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, numpages, rc); return rc; } for (i = 0; i < numpages; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, numpages, numpages, addr); if (rc != 0) free_xenballooned_pages(numpages, pages); return rc; }
static int omx_xen_accept_gref_list(omx_xenif_t * omx_xenif, struct omx_xen_user_region_segment *seg, uint32_t gref, void **vaddr, uint8_t part) { int ret = 0; struct backend_info *be = omx_xenif->be; struct vm_struct *area; pte_t *pte; struct gnttab_map_grant_ref ops = { .flags = GNTMAP_host_map | GNTMAP_contains_pte, //.flags = GNTMAP_host_map, .ref = gref, .dom = be->remoteDomain, }; dprintk_in(); area = alloc_vm_area(PAGE_SIZE, &pte); if (!area) { ret = -ENOMEM; goto out; } seg->vm_gref[part] = area; ops.host_addr = arbitrary_virt_to_machine(pte).maddr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) { printk_err("HYPERVISOR map grant ref failed"); ret = -ENOSYS; goto out; } dprintk_deb("addr=%#lx, mfn=%#lx, kaddr=%#lx\n", (unsigned long)area->addr, ops.dev_bus_addr >> PAGE_SHIFT, ops.host_addr); if (ops.status) { printk_err("HYPERVISOR map grant ref failed status = %d", ops.status); ret = ops.status; goto out; } dprintk_deb("gref_offset = %#x\n", seg->gref_offset); *vaddr = (area->addr + seg->gref_offset); ret = ops.handle; #if 0 for (i = 0; i < (size + 2); i++) { dprintk_deb("gref_list[%d] = %u\n", i, *(((uint32_t *) * vaddr) + i)); } #endif seg->all_handle[part] = ops.handle; dprintk_deb("vaddr = %p, area->addr=%p, handle[%d]=%d\n", vaddr, area->addr, part, seg->all_handle[part]); out: dprintk_out(); return ret; } int omx_xen_register_user_segment(omx_xenif_t * omx_xenif, struct omx_ring_msg_register_user_segment *req) { struct backend_info *be = omx_xenif->be; void *vaddr = NULL; uint32_t **gref_list; struct page **page_list; struct omxback_dev *omxdev = be->omxdev; struct omx_endpoint *endpoint; struct omx_xen_user_region *region; struct omx_xen_user_region_segment *seg; int ret = 0; int i = 0, k = 0; uint8_t eid, nr_parts; uint16_t first_page_offset, gref_offset; uint32_t sid, id, nr_grefs, nr_pages, length, gref[OMX_XEN_GRANT_PAGES_MAX]; uint64_t domU_vaddr; int idx = 0, sidx = 0; struct gnttab_map_grant_ref *map; struct gnttab_unmap_grant_ref *unmap; dprintk_in(); TIMER_START(&t_reg_seg); sid = req->sid; id = req->rid; eid = req->eid; domU_vaddr = req->aligned_vaddr; nr_grefs = req->nr_grefs; nr_pages = req->nr_pages; nr_parts = req->nr_parts; length = req->length; dprintk_deb("nr_parts = %#x\n", nr_parts); for (k = 0; k < nr_parts; k++) { gref[k] = req->gref[k]; dprintk_deb("printing gref = %lu\n", gref[k]); } gref_offset = req->gref_offset; first_page_offset = req->first_page_offset; endpoint = omxdev->endpoints[eid]; region = rcu_dereference_protected(endpoint->xen_regions[id], 1); if (unlikely(!region)) { printk_err(KERN_ERR "Cannot access non-existing region %d\n", id); ret = -EINVAL; goto out; } dprintk_deb("Got region @%#lx id=%u\n", (unsigned long)region, id); seg = ®ion->segments[sid]; if (unlikely(!seg)) { printk(KERN_ERR "Cannot access non-existing segment %d\n", sid); ret = -EINVAL; goto out; } dprintk_deb("Got segment @%#lx id=%u\n", (unsigned long)seg, sid); seg->gref_offset = gref_offset; dprintk_deb ("Offset of actual list of grant references (in the frontend) = %#x\n", gref_offset); for (k = 0; k < nr_parts; k++) { seg->all_gref[k] = gref[k]; dprintk_deb("grant reference for list of grefs = %#x\n", gref[k]); } seg->nr_parts = nr_parts; dprintk_deb("parts of gref list = %#x\n", nr_parts); TIMER_START(&t_alloc_pages); gref_list = kzalloc(sizeof(uint32_t *) * nr_parts, GFP_ATOMIC); if (!gref_list) { ret = -ENOMEM; printk_err("gref list is NULL, ENOMEM!!!\n"); goto out; } map = kzalloc(sizeof(struct gnttab_map_grant_ref) * nr_pages, GFP_ATOMIC); if (!map) { ret = -ENOMEM; printk_err(" map is NULL, ENOMEM!!!\n"); goto out; } unmap = kzalloc(sizeof(struct gnttab_unmap_grant_ref) * nr_pages, GFP_ATOMIC); if (!unmap) { ret = -ENOMEM; printk_err(" unmap is NULL, ENOMEM!!!\n"); goto out; } #ifdef OMX_XEN_COOKIES seg->cookie = omx_xen_page_get_cookie(omx_xenif, nr_pages); if (!seg->cookie) { printk_err("cannot get cookie\n"); goto out; } page_list = seg->cookie->pages; #else page_list = kzalloc(sizeof(struct page *) * nr_pages, GFP_ATOMIC); if (!page_list) { ret = -ENOMEM; printk_err(" page list is NULL, ENOMEM!!!\n"); goto out; } ret = alloc_xenballooned_pages(nr_pages, page_list, false /* lowmem */); if (ret) { printk_err("cannot allocate xenballooned_pages\n"); goto out; } #endif TIMER_STOP(&t_alloc_pages); TIMER_START(&t_accept_gref_list); for (k = 0; k < nr_parts; k++) { ret = omx_xen_accept_gref_list(omx_xenif, seg, gref[k], &vaddr, k); if (ret < 0) { printk_err("Cannot accept gref list, = %d\n", ret); goto out; } gref_list[k] = (uint32_t *) vaddr; if (!gref_list) { printk_err("gref_list is NULL!!!, = %p\n", gref_list); ret = -ENOSYS; goto out; } } TIMER_STOP(&t_accept_gref_list); seg->gref_list = gref_list; seg->nr_pages = nr_pages; seg->first_page_offset = first_page_offset; i = 0; idx = 0; sidx = 0; seg->map = map; seg->unmap = unmap; while (i < nr_pages) { void *tmp_vaddr; unsigned long addr = (unsigned long)pfn_to_kaddr(page_to_pfn(page_list[i])); if (sidx % 256 == 0) dprintk_deb("gref_list[%d][%d] = %#x\n", idx, sidx, gref_list[idx][sidx]); gnttab_set_map_op(&map[i], addr, GNTMAP_host_map, gref_list[idx][sidx], be->remoteDomain); gnttab_set_unmap_op(&unmap[i], addr, GNTMAP_host_map, -1 /* handle */ ); i++; if ((unlikely(i % nr_grefs == 0))) { idx++; sidx = 0; } else { sidx++; } //printk(KERN_INFO "idx=%d, i=%d, sidx=%d\n", idx, i, sidx); } TIMER_START(&t_accept_grants); ret = gnttab_map_refs(map, NULL, page_list, nr_pages); if (ret) { printk_err("Error mapping, ret= %d\n", ret); goto out; } TIMER_STOP(&t_accept_grants); for (i = 0; i < nr_pages; i++) { if (map[i].status) { ret = -EINVAL; printk_err("idx %d, status =%d\n", i, map[i].status); goto out; } else { //BUG_ON(map->map_ops[i].handle == -1); unmap[i].handle = map[i].handle; dprintk_deb("map handle=%d\n", map[i].handle); } } seg->pages = page_list; seg->nr_pages = nr_pages; seg->length = length; region->total_length += length; dprintk_deb("total_length = %#lx, nrpages=%lu, pages = %#lx\n", region->total_length, seg->nr_pages, (unsigned long)seg->pages); goto all_ok; out: printk_err("error registering, try to debug MORE!!!!\n"); all_ok: TIMER_STOP(&t_reg_seg); dprintk_out(); return ret; } int omx_xen_create_user_region(omx_xenif_t * omx_xenif, uint32_t id, uint64_t vaddr, uint32_t nr_segments, uint32_t nr_pages, uint32_t nr_grefs, uint8_t eid) { struct backend_info *be = omx_xenif->be; struct omxback_dev *omxdev = be->omxdev; struct omx_endpoint *endpoint = omxdev->endpoints[eid]; struct omx_xen_user_region *region; int ret = 0; dprintk_in(); TIMER_START(&t_create_reg); //udelay(1000); /* allocate the relevant region */ region = kzalloc(sizeof(struct omx_xen_user_region) + nr_segments * sizeof(struct omx_xen_user_region_segment), GFP_KERNEL); if (!region) { printk_err ("No memory to allocate the region/segment buffers\n"); ret = -ENOMEM; goto out; } /* init stuff needed :S */ kref_init(®ion->refcount); region->total_length = 0; region->nr_vmalloc_segments = 0; region->total_registered_length = 0; region->id = id; region->nr_segments = nr_segments; region->eid = eid; region->endpoint = endpoint; region->dirty = 0; if (unlikely(rcu_access_pointer(endpoint->xen_regions[id]) != NULL)) { printk(KERN_ERR "Cannot create busy region %d\n", id); ret = -EBUSY; goto out; } rcu_assign_pointer(endpoint->xen_regions[id], region); out: TIMER_STOP(&t_create_reg); dprintk_out(); return ret; } /* Various region/segment handler functions */ void omx_xen_user_region_destroy_segments(struct omx_xen_user_region *region, struct omx_endpoint *endpoint) { int i; dprintk_in(); if (!endpoint) { printk_err("endpoint is null!!\n"); return; } for (i = 0; i < region->nr_segments; i++) omx_xen_deregister_user_segment(endpoint->be->omx_xenif, region->id, i, endpoint->endpoint_index); dprintk_out(); }