/** * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() * @nr_pages; number of pages to free * @pages: the pages */ void gnttab_free_pages(int nr_pages, struct page **pages) { int i; for (i = 0; i < nr_pages; i++) { if (PagePrivate(pages[i])) { #if BITS_PER_LONG < 64 kfree((void *)page_private(pages[i])); #endif ClearPagePrivate(pages[i]); } } free_xenballooned_pages(nr_pages, pages); }
static int __init xlated_setup_gnttab_pages(void) { struct page **pages; xen_pfn_t *pfns; int rc; unsigned int i; unsigned long nr_grant_frames = gnttab_max_grant_frames(); BUG_ON(nr_grant_frames == 0); pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); if (!pages) return -ENOMEM; pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); if (!pfns) { kfree(pages); return -ENOMEM; } rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); kfree(pages); kfree(pfns); return rc; } for (i = 0; i < nr_grant_frames; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames, &xen_auto_xlat_grant_frames.vaddr); if (rc) { pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); free_xenballooned_pages(nr_grant_frames, pages); kfree(pages); kfree(pfns); return rc; } kfree(pages); xen_auto_xlat_grant_frames.pfn = pfns; xen_auto_xlat_grant_frames.count = nr_grant_frames; return 0; }
/* * PVH: we need three things: virtual address, pfns, and mfns. The pfns * are allocated via ballooning, then we call arch_gnttab_map_shared to * allocate the VA and put pfn's in the pte's for the VA. The mfn's are * finally allocated in gnttab_map() by xen which also populates the P2M. */ static int xlated_setup_gnttab_pages(unsigned long numpages, void **addr) { int i, rc; unsigned long pfns[numpages]; struct page *pages[numpages]; rc = alloc_xenballooned_pages(numpages, pages, 0); if (rc != 0) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, numpages, rc); return rc; } for (i = 0; i < numpages; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, numpages, numpages, addr); if (rc != 0) free_xenballooned_pages(numpages, pages); return rc; }
int omx_xen_deregister_user_segment(omx_xenif_t * omx_xenif, uint32_t id, uint32_t sid, uint8_t eid) { struct gnttab_unmap_grant_ref ops; struct backend_info *be = omx_xenif->be; struct omxback_dev *dev = be->omxdev; struct omx_endpoint *endpoint = dev->endpoints[eid]; struct omx_xen_user_region *region; struct omx_xen_user_region_segment *seg; int i, k, ret = 0; unsigned int level; dprintk_in(); TIMER_START(&t_dereg_seg); if (eid < 0 && eid >= 255) { printk_err ("Wrong endpoint number (%u) check your frontend/backend communication!\n", eid); ret = -EINVAL; goto out; } region = rcu_dereference_protected(endpoint->xen_regions[id], 1); if (unlikely(!region)) { printk_err( "%s: Cannot access non-existing region %d\n", __func__, id); //ret = -EINVAL; goto out; } seg = ®ion->segments[sid]; TIMER_START(&t_release_grants); if (!seg->unmap) { printk_err("seg->unmap is NULL\n"); ret = -EINVAL; goto out; } gnttab_unmap_refs(seg->unmap, NULL, seg->pages, seg->nr_pages); TIMER_STOP(&t_release_grants); TIMER_START(&t_release_gref_list); for (k = 0; k < seg->nr_parts; k++) { #ifdef EXTRA_DEBUG_OMX if (!seg->vm_gref) { printk(KERN_ERR "vm_gref is NULL\n"); ret = -EFAULT; goto out; } if (!seg->vm_gref[k]) { printk(KERN_ERR "vm_gref[%d] is NULL\n", k); ret = -EFAULT; goto out; } if (!seg->vm_gref[k]->addr) { printk(KERN_ERR "vm_gref[%d]->addr is NULL\n", k); ret = -EFAULT; goto out; } if (!seg->all_handle[k]) { printk(KERN_ERR "all_handle[%d] is NULL\n", k); ret = -EINVAL; goto out; } #endif gnttab_set_unmap_op(&ops, (unsigned long)seg->vm_gref[k]->addr, GNTMAP_host_map | GNTMAP_contains_pte, seg->all_handle[k]); ops.host_addr = arbitrary_virt_to_machine(lookup_address ((unsigned long)(seg->vm_gref[k]-> addr), &level)).maddr; dprintk_deb("putting vm_area[%d] %#lx, handle = %#x \n", k, (unsigned long)seg->vm_gref[k], seg->all_handle[k]); if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref, &ops, 1)){ printk_err ("HYPERVISOR operation failed\n"); //BUG(); } if (ops.status) { printk_err ("HYPERVISOR unmap grant ref[%d]=%#lx failed status = %d", k, seg->all_handle[k], ops.status); ret = ops.status; goto out; } } TIMER_STOP(&t_release_gref_list); TIMER_START(&t_free_pages); for (k=0;k<seg->nr_parts;k++) if (ops.status == GNTST_okay) free_vm_area(seg->vm_gref[k]); kfree(seg->map); kfree(seg->unmap); kfree(seg->gref_list); #ifdef OMX_XEN_COOKIES omx_xen_page_put_cookie(omx_xenif, seg->cookie); #else free_xenballooned_pages(seg->nr_pages, seg->pages); kfree(seg->pages); #endif TIMER_STOP(&t_free_pages); out: TIMER_STOP(&t_dereg_seg); dprintk_out(); return ret; }