static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(), &shared); BUG_ON(rc); kfree(frames); return 0; }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; if (xen_hvm_domain()) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; rc = 0; /* * Loop backwards, so that the first hypercall has the largest * index, ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i; rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); if (rc != 0) { #ifdef CONFIG_DEBUG_PRINTK printk(KERN_WARNING "grant table add_to_physmap failed, err=%d\n", rc); #else ; #endif break; } } while (i-- > start_idx); return rc; } frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), &shared); BUG_ON(rc); kfree(frames); return 0; }
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) { int rc; rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), &gnttab_shared.addr); BUG_ON(rc); return 0; }
static int __init xlated_setup_gnttab_pages(void) { struct page **pages; xen_pfn_t *pfns; int rc; unsigned int i; unsigned long nr_grant_frames = gnttab_max_grant_frames(); BUG_ON(nr_grant_frames == 0); pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); if (!pages) return -ENOMEM; pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); if (!pfns) { kfree(pages); return -ENOMEM; } rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); kfree(pages); kfree(pfns); return rc; } for (i = 0; i < nr_grant_frames; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames, &xen_auto_xlat_grant_frames.vaddr); if (rc) { pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); free_xenballooned_pages(nr_grant_frames, pages); kfree(pages); kfree(pfns); return rc; } kfree(pages); xen_auto_xlat_grant_frames.pfn = pfns; xen_auto_xlat_grant_frames.count = nr_grant_frames; return 0; }
static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) { uint64_t *sframes; unsigned int nr_sframes; struct gnttab_get_status_frames getframes; int rc; nr_sframes = nr_status_frames(nr_gframes); /* No need for kzalloc as it is initialized in following hypercall * GNTTABOP_get_status_frames. */ sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC); if (!sframes) return -ENOMEM; getframes.dom = DOMID_SELF; getframes.nr_frames = nr_sframes; set_xen_guest_handle(getframes.frame_list, sframes); rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, &getframes, 1); if (rc == -ENOSYS) { kfree(sframes); return -ENOSYS; } BUG_ON(rc || getframes.status); rc = arch_gnttab_map_status(sframes, nr_sframes, nr_status_frames(gnttab_max_grant_frames()), &grstatus); BUG_ON(rc); kfree(sframes); rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), &gnttab_shared.addr); BUG_ON(rc); return 0; }
/* * PVH: we need three things: virtual address, pfns, and mfns. The pfns * are allocated via ballooning, then we call arch_gnttab_map_shared to * allocate the VA and put pfn's in the pte's for the VA. The mfn's are * finally allocated in gnttab_map() by xen which also populates the P2M. */ static int xlated_setup_gnttab_pages(unsigned long numpages, void **addr) { int i, rc; unsigned long pfns[numpages]; struct page *pages[numpages]; rc = alloc_xenballooned_pages(numpages, pages, 0); if (rc != 0) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, numpages, rc); return rc; } for (i = 0; i < numpages; i++) pfns[i] = page_to_pfn(pages[i]); rc = arch_gnttab_map_shared(pfns, numpages, numpages, addr); if (rc != 0) free_xenballooned_pages(numpages, pages); return rc; }