/* * map one page for ring */ static int map_frontend_pages(struct xen_chrif *chrif, grant_ref_t ring_ref) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, ring_ref, chrif->domid); if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)){ printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed"); return -EFAULT; } if (op.status) { struct gnttab_unmap_grant_ref unop; gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, op.handle); HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1); printk(KERN_DEBUG "\nxen: dom0: op.status fail"); return op.status; } /* record ring_ref and handle */ chrif->shmem_ref = ring_ref; chrif->shmem_handle = op.handle; printk(KERN_DEBUG "\nxen: dom0: map page success, page=%x, handle = %x, status = %x", (unsigned int)chrif->comms_area->addr, op.handle, op.status); printk("\nxen: dom0: map frontend pages finished,otherend_id"); return 0; }
tube_t *tube_attach(domid_t peer_domid, uint32_t page_ref, uint32_t peer_port_rx, uint32_t peer_port_tx, void *data) { tube_t *tb = alloc_tube(0); if (tb == 0) return 0; tube_shared_t *page = tb->page; tb->page_map.ref = page_ref; tb->page_map.dom = peer_domid; tb->page_map.flags = GNTMAP_host_map; tb->page_map.host_addr = (uint64_t)page; int rs = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &tb->page_map, 1); assert(rs == 0); assert(tb->page_map.status == GNTST_okay); for (int i = 0; i < TUBE_SLOTS; i++) { struct gnttab_map_grant_ref *m = &tb->bufs_map[i]; m->ref = page->tx.slots[i].gref; m->dom = peer_domid; m->flags = GNTMAP_host_map; m->host_addr = (uint64_t)tb->tx_buffers[i]; } for (int i = 0; i < TUBE_SLOTS; i++) { struct gnttab_map_grant_ref *m = &tb->bufs_map[i+TUBE_SLOTS]; m->ref = page->rx.slots[i].gref; m->dom = peer_domid; m->flags = GNTMAP_host_map; m->host_addr = (uint64_t)tb->rx_buffers[i]; } rs = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, tb->bufs_map, 2*TUBE_SLOTS); assert(rs == 0); for (int i = 0; i < 2*TUBE_SLOTS; i++) { assert(tb->bufs_map[i].status == GNTST_okay); rmb(); //dark } tb->evtchn_tx = event_bind_interdomain(peer_domid, peer_port_tx); tb->evtchn_rx = event_bind_interdomain(peer_domid, peer_port_rx); event_bind(tb->evtchn_rx, tube_int, data); event_bind(tb->evtchn_tx, tube_send_int, data); return tb; }
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i, ret; ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); if (ret) return ret; for (i = 0; i < count; i++) { /* Retry eagain maps */ if (map_ops[i].status == GNTST_eagain) gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, &map_ops[i].status, __func__); if (map_ops[i].status == GNTST_okay) { struct xen_page_foreign *foreign; SetPageForeign(pages[i]); foreign = xen_page_foreign(pages[i]); foreign->domid = map_ops[i].dom; foreign->gref = map_ops[i].ref; } } return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); }
static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; }
void grants_init(void) { unsigned long frames[NR_GRANT_PAGES]; gnttab_setup_table_t op; op.dom = DOMID_SELF; op.nr_frames = NR_GRANT_PAGES; set_xen_guest_handle(op.frame_list, frames); int rs = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &op, 1); if (rs < 0) fatal_error("grants_init: setup_table failed: %d\n", rs); for (int i = NR_GRANT_ENTRIES-1; i >= NR_RESERVED_ENTRIES; i--) { free_list[i] = free_entry; free_entry = i; } grant_entries = mm_alloc_pages(NR_GRANT_PAGES); if (grant_entries == 0) fatal_error("grants_init: grant entries page allocation failed\n"); for (int i = 0; i < NR_GRANT_PAGES; i++) { unsigned long ma_grant_table = frames[i] << PAGE_SHIFT; rs = HYPERVISOR_update_va_mapping((unsigned long)grant_entries + i*PAGE_SIZE, __pte(ma_grant_table | 7), UVMF_INVLPG); if (rs < 0) fatal_error("grants_init: update mapping failed: %d\n", rs); } }
static void gnttab_request_version(void) { int rc; struct gnttab_set_version gsv; gsv.version = 1; rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); if (rc == 0 && gsv.version == 2) { grant_table_version = 2; grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { /* * If we've already used version 2 features, * but then suddenly discover that they're not * available (e.g. migrating to an older * version of Xen), almost unbounded badness * can happen. */ panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); gnttab_interface = &gnttab_v1_ops; } pr_info("Grant tables using version %d layout\n", grant_table_version); }
static void gnttab_request_version(void) { int rc; struct gnttab_set_version gsv; if (xen_hvm_domain()) gsv.version = 1; else gsv.version = 2; rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); if (rc == 0 && gsv.version == 2) { grant_table_version = 2; gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { /* * If we've already used version 2 features, * but then suddenly discover that they're not * available (e.g. migrating to an older * version of Xen), almost unbounded badness * can happen. */ panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; gnttab_interface = &gnttab_v1_ops; } printk(KERN_INFO "Grant tables using version %d layout.\n", grant_table_version); }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(), &shared); BUG_ON(rc); kfree(frames); return 0; }
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i, ret; bool lazy = false; ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); if (ret) return ret; if (xen_feature(XENFEAT_auto_translated_physmap)) return ret; if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { arch_enter_lazy_mmu_mode(); lazy = true; } for (i = 0; i < count; i++) { ret = m2p_remove_override(pages[i], kmap_ops ? &kmap_ops[i] : NULL); if (ret) return ret; } if (lazy) arch_leave_lazy_mmu_mode(); return ret; }
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i, ret; pte_t *pte; unsigned long mfn; ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); if (ret) return ret; if (xen_feature(XENFEAT_auto_translated_physmap)) return ret; for (i = 0; i < count; i++) { /* Do not add to override if the map failed. */ if (map_ops[i].status) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + (map_ops[i].host_addr & ~PAGE_MASK)); mfn = pte_mfn(*pte); } else { mfn = PFN_DOWN(map_ops[i].dev_bus_addr); } ret = m2p_add_override(mfn, pages[i], kmap_ops ? &kmap_ops[i] : NULL); if (ret) return ret; } return ret; }
grant_handle_t map(domid_t friend, unsigned int entry, void * shared_page, grant_handle_t * handle) { /* Set up the mapping operation */ gnttab_map_grant_ref_t map_op; map_op.host_addr = shared_page; map_op.flags = GNTMAP_host_map; map_op.ref = entry; map_op.dom = friend; /* Perform the map */ HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op,1); /* Check if it worked */ if(map_op.status != GNTST_okay) { return -1; } else { /* Return the handle */ *handle = map_op.handle; return 0; } }
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops, struct page **pages, unsigned int count) { int i, ret = 0; if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { ret = -EINVAL; goto out; } set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, kunmap_ops, count); out: return ret; }
static int _gntmap_map_grant_ref(struct gntmap_entry *entry, unsigned long host_addr, uint32_t domid, uint32_t ref, int writable) { struct gnttab_map_grant_ref op; int rc; op.ref = (grant_ref_t) ref; op.dom = (domid_t) domid; op.host_addr = (uint64_t) host_addr; op.flags = GNTMAP_host_map; if (!writable) op.flags |= GNTMAP_readonly; rc = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); if (rc != 0 || op.status != GNTST_okay) { printk("GNTTABOP_map_grant_ref failed: " "returned %d, status %" PRId16 "\n", rc, op.status); return rc != 0 ? rc : op.status; } entry->host_addr = host_addr; entry->handle = op.handle; return 0; }
static int map_frontend_page( struct vscsibk_info *info, unsigned long ring_ref) { struct gnttab_map_grant_ref op; int err; gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr, GNTMAP_host_map, ring_ref, info->domid); do { err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); BUG_ON(err); msleep(10); } while(op.status == GNTST_eagain); if (op.status) { printk(KERN_ERR "scsiback: Grant table operation failure !\n"); return op.status; } info->shmem_ref = ring_ref; info->shmem_handle = op.handle; return (GNTST_okay); }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; if (xen_hvm_domain()) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; rc = 0; /* * Loop backwards, so that the first hypercall has the largest * index, ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i; rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); if (rc != 0) { #ifdef CONFIG_DEBUG_PRINTK printk(KERN_WARNING "grant table add_to_physmap failed, err=%d\n", rc); #else ; #endif break; } } while (i-- > start_idx); return rc; } frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), &shared); BUG_ON(rc); kfree(frames); return 0; }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; xen_pfn_t *frames; unsigned int nr_gframes = end_idx + 1; int rc; if (xen_feature(XENFEAT_auto_translated_physmap)) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; rc = 0; BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes); /* * Loop backwards, so that the first hypercall has the largest * index, ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i]; rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); if (rc != 0) { pr_warn("grant table add_to_physmap failed, err=%d\n", rc); break; } } while (i-- > start_idx); return rc; } /* No need for kzalloc as it is initialized in following hypercall * GNTTABOP_setup_table. */ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); rc = gnttab_interface->map_frames(frames, nr_gframes); kfree(frames); return rc; }
void fini_gnttab(void) { struct gnttab_setup_table setup; setup.dom = DOMID_SELF; setup.nr_frames = 0; HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); }
static void unmap_frontend_page(struct xen_blkif *blkif) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, GNTMAP_host_map, blkif->shmem_handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); }
/*ARGSUSED*/ int xen_map_gref(uint_t cmd, gnttab_map_grant_ref_t *mapop, uint_t count, boolean_t uvaddr) { long rc; ASSERT(cmd == GNTTABOP_map_grant_ref); rc = HYPERVISOR_grant_table_op(cmd, mapop, count); return (rc); }
void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) { struct gnttab_copy *op; if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) BUG(); for (op = batch; op < batch + count; op++) if (op->status == GNTST_eagain) gnttab_retry_eagain_gop(GNTTABOP_copy, op, &op->status, __func__); }
/* Disable grant tables */ CAMLprim value caml_gnttab_fini(value unit) { CAMLparam1(unit); struct gnttab_setup_table setup; setup.dom = DOMID_SELF; setup.nr_frames = 0; HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); CAMLreturn(Val_unit); }
static void unmap_frontend_pages(netif_t *netif) { struct gnttab_unmap_grant_ref op; int ret; gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr, GNTMAP_host_map, netif->tx_shmem_handle); lock_vm_area(netif->tx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(netif->tx_comms_area); BUG_ON(ret); gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr, GNTMAP_host_map, netif->rx_shmem_handle); lock_vm_area(netif->rx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(netif->rx_comms_area); BUG_ON(ret); }
static void unmap_frontend_page(struct vscsibk_info *info) { struct gnttab_unmap_grant_ref op; int err; gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr, GNTMAP_host_map, info->shmem_handle); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); BUG_ON(err); }
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int ret; ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); if (ret) return ret; return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); }
static int map_frontend_pages( netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { struct gnttab_map_grant_ref op; int ret; gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr, GNTMAP_host_map, tx_ring_ref, netif->domid); lock_vm_area(netif->tx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); unlock_vm_area(netif->tx_comms_area); BUG_ON(ret); if (op.status) { DPRINTK(" Gnttab failure mapping tx_ring_ref!\n"); return op.status; } netif->tx_shmem_ref = tx_ring_ref; netif->tx_shmem_handle = op.handle; gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr, GNTMAP_host_map, rx_ring_ref, netif->domid); lock_vm_area(netif->rx_comms_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); unlock_vm_area(netif->rx_comms_area); BUG_ON(ret); if (op.status) { DPRINTK(" Gnttab failure mapping rx_ring_ref!\n"); return op.status; } netif->rx_shmem_ref = rx_ring_ref; netif->rx_shmem_handle = op.handle; return 0; }
static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; }
static void unmap_frontend_page(blkif_t *blkif) { struct gnttab_unmap_grant_ref op; int ret; gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, GNTMAP_host_map, blkif->shmem_handle); lock_vm_area(blkif->blk_ring_area); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); unlock_vm_area(blkif->blk_ring_area); BUG_ON(ret); }
void tube_destroy(tube_t *tb) { if (tb->accepting) { event_unbind(tb->evtchn_tx); // for (int i = 0; i < TUBE_SLOTS; i++) // grants_end_access(tb->page->tx.slots[i].gref); // for (int i = 0; i < TUBE_SLOTS; i++) // grants_end_access(tb->page->rx.slots[i].gref); // grants_end_access(tb->page_ref); } else { event_unbind(tb->evtchn_rx); int rs = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, tb->bufs_map, 2*TUBE_SLOTS); assert(rs == 0); rs = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &tb->page_map, 1); assert(rs == 0); } nfree(tb->node); }
/* * unmap one page of ring */ static void unmap_frontend_pages(struct xen_chrif *chrif) { struct gnttab_unmap_grant_ref unop; gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, chrif->shmem_handle); if(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1)) printk(KERN_DEBUG "\nxen:dom0: unmapped shared page failed"); free_vm_area(chrif->comms_area); printk(KERN_DEBUG "\nxen:dom0: unmapped frontend pages finished"); }
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i, ret; bool lazy = false; pte_t *pte; unsigned long mfn; ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); if (ret) return ret; /* Retry eagain maps */ for (i = 0; i < count; i++) if (map_ops[i].status == GNTST_eagain) gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, &map_ops[i].status, __func__); if (xen_feature(XENFEAT_auto_translated_physmap)) return ret; if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { arch_enter_lazy_mmu_mode(); lazy = true; } for (i = 0; i < count; i++) { /* Do not add to override if the map failed. */ if (map_ops[i].status) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + (map_ops[i].host_addr & ~PAGE_MASK)); mfn = pte_mfn(*pte); } else { mfn = PFN_DOWN(map_ops[i].dev_bus_addr); } ret = m2p_add_override(mfn, pages[i], kmap_ops ? &kmap_ops[i] : NULL); if (ret) goto out; } out: if (lazy) arch_leave_lazy_mmu_mode(); return ret; }