int xc_readconsolering(int xc_handle, char **pbuffer, unsigned int *pnr_chars, int clear, int incremental, uint32_t *pindex) { int ret; DECLARE_SYSCTL; char *buffer = *pbuffer; unsigned int nr_chars = *pnr_chars; sysctl.cmd = XEN_SYSCTL_readconsole; set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer); sysctl.u.readconsole.count = nr_chars; sysctl.u.readconsole.clear = clear; sysctl.u.readconsole.incremental = 0; if ( pindex ) { sysctl.u.readconsole.index = *pindex; sysctl.u.readconsole.incremental = incremental; } if ( (ret = lock_pages(buffer, nr_chars)) != 0 ) return ret; if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 ) { *pnr_chars = sysctl.u.readconsole.count; if ( pindex ) *pindex = sysctl.u.readconsole.index; } unlock_pages(buffer, nr_chars); return ret; }
int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask) { DECLARE_SYSCTL; int ret = -1; uint64_t mask64 = mask; uint8_t bytemap[sizeof(mask64)]; sysctl.cmd = XEN_SYSCTL_tbuf_op; sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION; sysctl.u.tbuf_op.cmd = XEN_SYSCTL_TBUFOP_set_cpu_mask; bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8); set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap); sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8; if ( lock_pages(&bytemap, sizeof(bytemap)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; } ret = do_sysctl(xc_handle, &sysctl); unlock_pages(&bytemap, sizeof(bytemap)); out: return ret; }
int xc_vcpu_setcontext(int xc_handle, uint32_t domid, uint32_t vcpu, vcpu_guest_context_any_t *ctxt) { DECLARE_DOMCTL; int rc; size_t sz = sizeof(vcpu_guest_context_any_t); if (ctxt == NULL) { errno = EINVAL; return -1; } domctl.cmd = XEN_DOMCTL_setvcpucontext; domctl.domain = domid; domctl.u.vcpucontext.vcpu = vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); if ( (rc = lock_pages(ctxt, sz)) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); unlock_pages(ctxt, sz); return rc; }
int xc_hvm_set_pci_intx_level( int xc_handle, domid_t dom, uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) { DECLARE_HYPERCALL; struct xen_hvm_set_pci_intx_level arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_pci_intx_level; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.domain = domain; arg.bus = bus; arg.device = device; arg.intx = intx; arg.level = level; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, uint64_t cpumap) { DECLARE_DOMCTL; int ret = -1; uint8_t local[sizeof (cpumap)]; domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; } ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof(local)); out: return ret; }
int xc_hvm_set_pci_link_route( int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq) { DECLARE_HYPERCALL; struct xen_hvm_set_pci_link_route arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_pci_link_route; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.link = link; arg.isa_irq = isa_irq; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_hvm_set_isa_irq_level( int xc_handle, domid_t dom, uint8_t isa_irq, unsigned int level) { DECLARE_HYPERCALL; struct xen_hvm_set_isa_irq_level arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_isa_irq_level; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.isa_irq = isa_irq; arg.level = level; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_domain_getinfolist(int xc_handle, uint32_t first_domain, unsigned int max_domains, xc_domaininfo_t *info) { int ret = 0; DECLARE_SYSCTL; if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) return -1; sysctl.cmd = XEN_SYSCTL_getdomaininfolist; sysctl.u.getdomaininfolist.first_domain = first_domain; sysctl.u.getdomaininfolist.max_domains = max_domains; set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info); if ( xc_sysctl(xc_handle, &sysctl) < 0 ) ret = -1; else ret = sysctl.u.getdomaininfolist.num_domains; unlock_pages(info, max_domains*sizeof(xc_domaininfo_t)); return ret; }
/* Get just one element of the HVM guest context. * size must be >= HVM_SAVE_LENGTH(type) */ int xc_domain_hvm_getcontext_partial(int xc_handle, uint32_t domid, uint16_t typecode, uint16_t instance, void *ctxt_buf, uint32_t size) { int ret; DECLARE_DOMCTL; if ( !ctxt_buf ) return -EINVAL; domctl.cmd = XEN_DOMCTL_gethvmcontext_partial; domctl.domain = (domid_t) domid; domctl.u.hvmcontext_partial.type = typecode; domctl.u.hvmcontext_partial.instance = instance; set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf); if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) return ret; ret = do_domctl(xc_handle, &domctl); if ( ctxt_buf ) unlock_pages(ctxt_buf, size); return ret ? -1 : 0; }
int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf, unsigned long max_pfns) { DECLARE_DOMCTL; int ret; domctl.cmd = XEN_DOMCTL_getmemlist; domctl.domain = (domid_t)domid; domctl.u.getmemlist.max_pfns = max_pfns; set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf); #ifdef VALGRIND memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf)); #endif if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 ) { PERROR("xc_get_pfn_list: pfn_buf lock failed"); return -1; } ret = do_domctl(xc_handle, &domctl); unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)); return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns; }
int xc_hvm_track_dirty_vram( int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr, unsigned long *dirty_bitmap) { DECLARE_HYPERCALL; struct xen_hvm_track_dirty_vram arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_track_dirty_vram; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.first_pfn = first_pfn; arg.nr = nr; set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap); if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_mmuext_op( int xc_handle, struct mmuext_op *op, unsigned int nr_ops, domid_t dom) { DECLARE_HYPERCALL; long ret = -EINVAL; hypercall.op = __HYPERVISOR_mmuext_op; hypercall.arg[0] = (unsigned long)op; hypercall.arg[1] = (unsigned long)nr_ops; hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; } ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(op, nr_ops*sizeof(*op)); out1: return ret; }
static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu) { int err = 0; DECLARE_HYPERCALL; if ( mmu->idx == 0 ) return 0; hypercall.op = __HYPERVISOR_mmu_update; hypercall.arg[0] = (unsigned long)mmu->updates; hypercall.arg[1] = (unsigned long)mmu->idx; hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 ) { PERROR("flush_mmu_updates: mmu updates lock_pages failed"); err = 1; goto out; } if ( do_xen_hypercall(xc_handle, &hypercall) < 0 ) { ERROR("Failure when submitting mmu updates"); err = 1; } mmu->idx = 0; unlock_pages(mmu->updates, sizeof(mmu->updates)); out: return err; }
static int xc_ia64_pv_recv_vcpu_context(int xc_handle, int io_fd, int32_t dom, uint32_t vcpu) { int rc = -1; /* A copy of the CPU context of the guest. */ vcpu_guest_context_any_t ctxt_any; vcpu_guest_context_t *ctxt = &ctxt_any.c; if (lock_pages(&ctxt_any, sizeof(ctxt_any))) { /* needed for build domctl, but might as well do early */ ERROR("Unable to lock_pages ctxt"); return -1; } if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, vcpu, &ctxt_any)) goto out; /* Then get privreg page. */ if (read_page(xc_handle, io_fd, dom, ctxt->privregs_pfn) < 0) { ERROR("Could not read vcpu privregs"); goto out; } rc = 0; out: unlock_pages(&ctxt, sizeof(ctxt)); return rc; }
int xc_hvm_set_mem_type( int xc_handle, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr) { DECLARE_HYPERCALL; struct xen_hvm_set_mem_type arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_mem_type; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.hvmmem_type = mem_type; arg.first_pfn = first_pfn; arg.nr = nr; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_domain_get_tsc_info(int xc_handle, uint32_t domid, uint32_t *tsc_mode, uint64_t *elapsed_nsec, uint32_t *gtsc_khz, uint32_t *incarnation) { int rc; DECLARE_DOMCTL; xen_guest_tsc_info_t info = { 0 }; domctl.cmd = XEN_DOMCTL_gettscinfo; domctl.domain = (domid_t)domid; set_xen_guest_handle(domctl.u.tsc_info.out_info, &info); if ( (rc = lock_pages(&info, sizeof(info))) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); if ( rc == 0 ) { *tsc_mode = info.tsc_mode; *elapsed_nsec = info.elapsed_nsec; *gtsc_khz = info.gtsc_khz; *incarnation = info.incarnation; } unlock_pages(&info,sizeof(info)); return rc; }
int xc_get_device_group( int xc_handle, uint32_t domid, uint32_t machine_bdf, uint32_t max_sdevs, uint32_t *num_sdevs, uint32_t *sdev_array) { int rc; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_get_device_group; domctl.domain = (domid_t)domid; domctl.u.get_device_group.machine_bdf = machine_bdf; domctl.u.get_device_group.max_sdevs = max_sdevs; set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array); if ( lock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 ) { PERROR("Could not lock memory for xc_get_device_group\n"); return -ENOMEM; } rc = do_domctl(xc_handle, &domctl); unlock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)); *num_sdevs = domctl.u.get_device_group.num_sdevs; return rc; }
int xc_domain_shutdown(int xc_handle, uint32_t domid, int reason) { int ret = -1; sched_remote_shutdown_t arg; DECLARE_HYPERCALL; hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown; hypercall.arg[1] = (unsigned long)&arg; arg.domain_id = domid; arg.reason = reason; if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; } ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); out1: return ret; }
int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { int rc; struct xen_foreign_memory_map fmap = { .domid = domid, .map = { .nr_entries = 1 } }; struct e820entry e820 = { .addr = 0, .size = (uint64_t)map_limitkb << 10, .type = E820_RAM }; set_xen_guest_handle(fmap.map.buffer, &e820); if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) ) { PERROR("Could not lock memory for Xen hypercall"); rc = -1; goto out; } rc = xc_memory_op(xc_handle, XENMEM_set_memory_map, &fmap); out: unlock_pages(&fmap, sizeof(fmap)); unlock_pages(&e820, sizeof(e820)); return rc; } #else int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { PERROR("Function not implemented"); errno = ENOSYS; return -1; }
static void* xc_ia64_map_foreign_p2m(int xc_handle, uint32_t dom, struct xen_ia64_memmap_info *memmap_info, unsigned long flags, unsigned long *p2m_size_p) { unsigned long gpfn_max; unsigned long p2m_size; void *addr; privcmd_hypercall_t hypercall; int ret; int saved_errno; gpfn_max = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom); if (gpfn_max < 0) return NULL; p2m_size = (((gpfn_max + 1) + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT; addr = mmap(NULL, p2m_size, PROT_READ, MAP_SHARED, xc_handle, 0); if (addr == MAP_FAILED) return NULL; hypercall.op = __HYPERVISOR_ia64_dom0vp_op; hypercall.arg[0] = IA64_DOM0VP_expose_foreign_p2m; hypercall.arg[1] = (unsigned long)addr; hypercall.arg[2] = dom; hypercall.arg[3] = (unsigned long)memmap_info; hypercall.arg[4] = flags; if (lock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size) != 0) { saved_errno = errno; munmap(addr, p2m_size); errno = saved_errno; return NULL; } ret = do_xen_hypercall(xc_handle, &hypercall); saved_errno = errno; unlock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size); if (ret < 0) { munmap(addr, p2m_size); errno = saved_errno; return NULL; } *p2m_size_p = p2m_size; return addr; }
int xc_send_debug_keys(int xc_handle, char *keys) { int ret, len = strlen(keys); DECLARE_SYSCTL; sysctl.cmd = XEN_SYSCTL_debug_keys; set_xen_guest_handle(sysctl.u.debug_keys.keys, keys); sysctl.u.debug_keys.nr_keys = len; if ( (ret = lock_pages(keys, len)) != 0 ) return ret; ret = do_sysctl(xc_handle, &sysctl); unlock_pages(keys, len); return ret; }
int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value) { DECLARE_HYPERCALL; xen_hvm_param_t arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_get_param; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.index = param; if ( lock_pages(&arg, sizeof(arg)) != 0 ) return -1; rc = do_xen_hypercall(handle, &hypercall); unlock_pages(&arg, sizeof(arg)); *value = arg.value; return rc; }
/* It is possible to get memmap_info and memmap by foreign domain page mapping. But it's racy. Use hypercall to avoid race. */ static int xc_ia64_get_memmap(int xc_handle, uint32_t domid, char *buf, unsigned long bufsize) { privcmd_hypercall_t hypercall; int ret; hypercall.op = __HYPERVISOR_ia64_dom0vp_op; hypercall.arg[0] = IA64_DOM0VP_get_memmap; hypercall.arg[1] = domid; hypercall.arg[2] = (unsigned long)buf; hypercall.arg[3] = bufsize; hypercall.arg[4] = 0; if (lock_pages(buf, bufsize) != 0) return -1; ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(buf, bufsize); return ret; }
int xc_version(int xc_handle, int cmd, void *arg) { int rc, argsize = 0; switch ( cmd ) { case XENVER_extraversion: argsize = sizeof(xen_extraversion_t); break; case XENVER_compile_info: argsize = sizeof(xen_compile_info_t); break; case XENVER_capabilities: argsize = sizeof(xen_capabilities_info_t); break; case XENVER_changeset: argsize = sizeof(xen_changeset_info_t); break; case XENVER_platform_parameters: argsize = sizeof(xen_platform_parameters_t); break; } if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) ) { PERROR("Could not lock memory for version hypercall"); return -ENOMEM; } #ifdef VALGRIND if (argsize != 0) memset(arg, 0, argsize); #endif rc = do_xen_version(xc_handle, cmd, arg); if ( argsize != 0 ) unlock_pages(arg, argsize); return rc; }
/* set info to hvm guest for restore */ int xc_domain_hvm_setcontext(int xc_handle, uint32_t domid, uint8_t *ctxt_buf, uint32_t size) { int ret; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_sethvmcontext; domctl.domain = domid; domctl.u.hvmcontext.size = size; set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf); if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) return ret; ret = do_domctl(xc_handle, &domctl); unlock_pages(ctxt_buf, size); return ret; }
int xc_vcpu_getcontext(int xc_handle, uint32_t domid, uint32_t vcpu, vcpu_guest_context_any_t *ctxt) { int rc; DECLARE_DOMCTL; size_t sz = sizeof(vcpu_guest_context_any_t); domctl.cmd = XEN_DOMCTL_getvcpucontext; domctl.domain = (domid_t)domid; domctl.u.vcpucontext.vcpu = (uint16_t)vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); if ( (rc = lock_pages(ctxt, sz)) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); unlock_pages(ctxt, sz); return rc; }
int xc_getcpuinfo(int xc_handle, int max_cpus, xc_cpuinfo_t *info, int *nr_cpus) { int rc; DECLARE_SYSCTL; sysctl.cmd = XEN_SYSCTL_getcpuinfo; sysctl.u.getcpuinfo.max_cpus = max_cpus; set_xen_guest_handle(sysctl.u.getcpuinfo.info, info); if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 ) return rc; rc = do_sysctl(xc_handle, &sysctl); unlock_pages(info, max_cpus*sizeof(*info)); if ( nr_cpus ) *nr_cpus = sysctl.u.getcpuinfo.nr_cpus; return rc; }
int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, uint64_t *cpumap, int cpusize) { DECLARE_DOMCTL; int ret = -1; uint8_t *local = malloc(cpusize); if(local == NULL) { PERROR("Could not alloc memory for Xen hypercall"); goto out; } domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; bitmap_64_to_byte(local, cpumap, cpusize * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; if ( lock_pages(local, cpusize) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; } ret = do_domctl(xc_handle, &domctl); unlock_pages(local, cpusize); out: free(local); return ret; }
static int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf, unsigned int start_page, unsigned int nr_pages) { DECLARE_DOMCTL; int ret; domctl.cmd = XEN_DOMCTL_getmemlist; domctl.domain = (domid_t)domid; domctl.u.getmemlist.max_pfns = nr_pages; domctl.u.getmemlist.start_pfn = start_page; domctl.u.getmemlist.num_pfns = 0; set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf); if (lock_pages(pfn_buf, nr_pages * sizeof(xen_pfn_t)) != 0) { PERROR("Could not lock pfn list buffer"); return -1; } ret = do_domctl(xc_handle, &domctl); unlock_pages(pfn_buf, nr_pages * sizeof(xen_pfn_t)); return ret < 0 ? -1 : nr_pages; }
int main(int argc, char *argv[]) { int i, j, xc_handle; xc_perfc_desc_t *pcd; xc_perfc_val_t *pcv; xc_perfc_val_t *val; int num_desc, num_val; unsigned int sum, reset = 0, full = 0, pretty = 0; char hypercall_name[36]; if ( argc > 1 ) { char *p = argv[1]; if ( p[0] == '-' ) { switch ( p[1] ) { case 'f': full = 1; break; case 'p': full = 1; pretty = 1; break; case 'r': reset = 1; break; default: goto error; } } else { error: printf("%s: [-r]\n", argv[0]); printf("no args: print digested counters\n"); printf(" -f : print full arrays/histograms\n"); printf(" -p : print full arrays/histograms in pretty format\n"); printf(" -r : reset counters\n"); return 0; } } if ( (xc_handle = xc_interface_open()) == -1 ) { fprintf(stderr, "Error opening xc interface: %d (%s)\n", errno, strerror(errno)); return 1; } if ( reset ) { if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_reset, NULL, NULL, NULL, NULL) != 0 ) { fprintf(stderr, "Error reseting performance counters: %d (%s)\n", errno, strerror(errno)); return 1; } return 0; } if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query, NULL, NULL, &num_desc, &num_val) != 0 ) { fprintf(stderr, "Error getting number of perf counters: %d (%s)\n", errno, strerror(errno)); return 1; } pcd = malloc(sizeof(*pcd) * num_desc); pcv = malloc(sizeof(*pcv) * num_val); if ( pcd == NULL || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0 || pcv == NULL || lock_pages(pcv, sizeof(*pcv) * num_val) != 0) { fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n", errno, strerror(errno)); exit(-1); } if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query, pcd, pcv, NULL, NULL) != 0 ) { fprintf(stderr, "Error getting perf counter: %d (%s)\n", errno, strerror(errno)); return 1; } unlock_pages(pcd, sizeof(*pcd) * num_desc); unlock_pages(pcv, sizeof(*pcv) * num_val); val = pcv; for ( i = 0; i < num_desc; i++ ) { printf ("%-35s ", pcd[i].name); sum = 0; for ( j = 0; j < pcd[i].nr_vals; j++ ) sum += val[j]; printf ("T=%10u ", (unsigned int)sum); if ( full || (pcd[i].nr_vals <= 4) ) { if ( pretty && (strcmp(pcd[i].name, "hypercalls") == 0) ) { printf("\n"); for( j = 0; j < pcd[i].nr_vals; j++ ) { if ( val[j] == 0 ) continue; if ( (j < 64) && hypercall_name_table[j] ) strncpy(hypercall_name, hypercall_name_table[j], sizeof(hypercall_name)); else snprintf(hypercall_name, sizeof(hypercall_name), "[%d]", j); hypercall_name[sizeof(hypercall_name)-1]='\0'; printf("%-35s ", hypercall_name); printf("%12u\n", (unsigned int)val[j]); } } else { for ( j = 0; j < pcd[i].nr_vals; j++ ) printf(" %10u", (unsigned int)val[j]); printf("\n"); } } else { printf("\n"); } val += pcd[i].nr_vals; } return 0; }