int xc_watchdog(xc_interface *xch, uint32_t id, uint32_t timeout) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(sched_watchdog_t, arg); arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); if ( arg == NULL ) { PERROR("Could not allocate memory for xc_watchdog hypercall"); goto out1; } hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_watchdog; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); arg->id = id; arg->timeout = timeout; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_buffer_free(xch, arg); out1: return ret; }
int xc_kexec_get_range(xc_interface *xch, int range, int nr, uint64_t *size, uint64_t *start) { DECLARE_HYPERCALL_BUFFER(xen_kexec_range_t, get_range); int ret = -1; get_range = xc_hypercall_buffer_alloc(xch, get_range, sizeof(*get_range)); if ( get_range == NULL ) { PERROR("Could not alloc bounce buffer for kexec_get_range hypercall"); goto out; } get_range->range = range; get_range->nr = nr; ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op, KEXEC_CMD_kexec_get_range, HYPERCALL_BUFFER_AS_ARG(get_range)); *size = get_range->size; *start = get_range->start; out: xc_hypercall_buffer_free(xch, get_range); return ret; }
int xc_domain_shutdown(xc_interface *xch, uint32_t domid, int reason) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(sched_remote_shutdown_t, arg); arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); if ( arg == NULL ) { PERROR("Could not allocate memory for xc_domain_shutdown hypercall"); goto out1; } hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); arg->domain_id = domid; arg->reason = reason; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_buffer_free(xch, arg); out1: return ret; }
int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask) { DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, bytemap); int ret = -1; uint64_t mask64 = mask; bytemap = xc_hypercall_buffer_alloc(xch, bytemap, sizeof(mask64)); if (bytemap == NULL) { PERROR("Could not allocate memory for xc_tbuf_set_cpu_mask hypercall"); goto out; } sysctl.cmd = XEN_SYSCTL_tbuf_op; sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION; sysctl.u.tbuf_op.cmd = XEN_SYSCTL_TBUFOP_set_cpu_mask; bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8); set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap); sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8; ret = do_sysctl(xch, &sysctl); xc_hypercall_buffer_free(xch, bytemap); out: return ret; }
int xc_hvm_set_isa_irq_level( xc_interface *xch, domid_t dom, uint8_t isa_irq, unsigned int level) { DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_isa_irq_level, arg); int rc; arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); if ( arg == NULL ) { PERROR("Could not allocate memory for xc_hvm_set_isa_irq_level hypercall"); return -1; } arg->domid = dom; arg->isa_irq = isa_irq; arg->level = level; rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op, HVMOP_set_isa_irq_level, HYPERCALL_BUFFER_AS_ARG(arg)); xc_hypercall_buffer_free(xch, arg); return rc; }
int xc_kexec_exec(xc_interface *xch, int type) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(xen_kexec_exec_t, exec); int ret = -1; exec = xc_hypercall_buffer_alloc(xch, exec, sizeof(*exec)); if ( exec == NULL ) { PERROR("Count not alloc bounce buffer for kexec_exec hypercall"); goto out; } exec->type = type; hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(exec); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, exec); return ret; }
int xc_kexec_unload(xc_interface *xch, int type) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(xen_kexec_unload_t, unload); int ret = -1; unload = xc_hypercall_buffer_alloc(xch, unload, sizeof(*unload)); if ( unload == NULL ) { PERROR("Count not alloc buffer for kexec unload hypercall"); goto out; } unload->type = type; hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec_unload; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(unload); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, unload); return ret; }
static xen_pfn_t xc_dom_gnttab_setup(xc_interface *xch, domid_t domid) { gnttab_setup_table_t setup; DECLARE_HYPERCALL_BUFFER(xen_pfn_t, gmfnp); int rc; xen_pfn_t gmfn; gmfnp = xc_hypercall_buffer_alloc(xch, gmfnp, sizeof(*gmfnp)); if (gmfnp == NULL) return -1; setup.dom = domid; setup.nr_frames = 1; set_xen_guest_handle(setup.frame_list, gmfnp); setup.status = 0; rc = xc_gnttab_op(xch, GNTTABOP_setup_table, &setup, sizeof(setup), 1); gmfn = *gmfnp; xc_hypercall_buffer_free(xch, gmfnp); if ( rc != 0 || setup.status != GNTST_okay ) { xc_dom_panic(xch, XC_INTERNAL_ERROR, "%s: failed to setup domU grant table " "[errno=%d, status=%" PRId16 "]\n", __FUNCTION__, rc != 0 ? errno : 0, setup.status); return -1; } return gmfn; }
int xc_domain_get_tsc_info(xc_interface *xch, uint32_t domid, uint32_t *tsc_mode, uint64_t *elapsed_nsec, uint32_t *gtsc_khz, uint32_t *incarnation) { int rc; DECLARE_DOMCTL; DECLARE_HYPERCALL_BUFFER(xen_guest_tsc_info_t, info); info = xc_hypercall_buffer_alloc(xch, info, sizeof(*info)); if ( info == NULL ) return -ENOMEM; domctl.cmd = XEN_DOMCTL_gettscinfo; domctl.domain = (domid_t)domid; set_xen_guest_handle(domctl.u.tsc_info.out_info, info); rc = do_domctl(xch, &domctl); if ( rc == 0 ) { *tsc_mode = info->tsc_mode; *elapsed_nsec = info->elapsed_nsec; *gtsc_khz = info->gtsc_khz; *incarnation = info->incarnation; } xc_hypercall_buffer_free(xch, info); return rc; }
void cpu_topology_func(int argc, char *argv[]) { DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_core); DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_socket); DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_node); xc_topologyinfo_t info = { 0 }; int i; cpu_to_core = xc_hypercall_buffer_alloc(xc_handle, cpu_to_core, sizeof(*cpu_to_core) * MAX_NR_CPU); cpu_to_socket = xc_hypercall_buffer_alloc(xc_handle, cpu_to_socket, sizeof(*cpu_to_socket) * MAX_NR_CPU); cpu_to_node = xc_hypercall_buffer_alloc(xc_handle, cpu_to_node, sizeof(*cpu_to_node) * MAX_NR_CPU); if ( cpu_to_core == NULL || cpu_to_socket == NULL || cpu_to_node == NULL ) { fprintf(stderr, "failed to allocate hypercall buffers\n"); goto out; } set_xen_guest_handle(info.cpu_to_core, cpu_to_core); set_xen_guest_handle(info.cpu_to_socket, cpu_to_socket); set_xen_guest_handle(info.cpu_to_node, cpu_to_node); info.max_cpu_index = MAX_NR_CPU-1; if ( xc_topologyinfo(xc_handle, &info) ) { printf("Can not get Xen CPU topology: %d\n", errno); goto out; } if ( info.max_cpu_index > (MAX_NR_CPU-1) ) info.max_cpu_index = MAX_NR_CPU-1; printf("CPU\tcore\tsocket\tnode\n"); for ( i = 0; i <= info.max_cpu_index; i++ ) { if ( cpu_to_core[i] == INVALID_TOPOLOGY_ID ) continue; printf("CPU%d\t %d\t %d\t %d\n", i, cpu_to_core[i], cpu_to_socket[i], cpu_to_node[i]); } out: xc_hypercall_buffer_free(xc_handle, cpu_to_core); xc_hypercall_buffer_free(xc_handle, cpu_to_socket); xc_hypercall_buffer_free(xc_handle, cpu_to_node); }
xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, uint32_t poolid) { int err = 0; xc_cpupoolinfo_t *info = NULL; int local_size; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); local_size = xc_get_cpumap_size(xch); if (local_size <= 0) { PERROR("Could not get number of cpus"); return NULL; } local = xc_hypercall_buffer_alloc(xch, local, local_size); if ( local == NULL ) { PERROR("Could not allocate locked memory for xc_cpupool_getinfo"); return NULL; } sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; sysctl.u.cpupool_op.cpupool_id = poolid; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); sysctl.u.cpupool_op.cpumap.nr_bits = local_size * 8; err = do_sysctl_save(xch, &sysctl); if ( err < 0 ) goto out; info = calloc(1, sizeof(xc_cpupoolinfo_t)); if ( !info ) goto out; info->cpumap = xc_cpumap_alloc(xch); if (!info->cpumap) { free(info); info = NULL; goto out; } info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; info->sched_id = sysctl.u.cpupool_op.sched_id; info->n_dom = sysctl.u.cpupool_op.n_dom; memcpy(info->cpumap, local, local_size); out: xc_hypercall_buffer_free(xch, local); return info; }
int xc_livepatch_upload(xc_interface *xch, char *name, unsigned char *payload, uint32_t size) { int rc; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(char, local); DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN); xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } }; if ( !name || !payload ) { errno = EINVAL; return -1; } def_name.size = strlen(name) + 1; if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE ) { errno = EINVAL; return -1; } HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size); if ( xc_hypercall_bounce_pre(xch, name) ) return -1; local = xc_hypercall_buffer_alloc(xch, local, size); if ( !local ) { xc_hypercall_bounce_post(xch, name); return -1; } memcpy(local, payload, size); sysctl.cmd = XEN_SYSCTL_livepatch_op; sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD; sysctl.u.livepatch.pad = 0; sysctl.u.livepatch.u.upload.size = size; set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local); sysctl.u.livepatch.u.upload.name = def_name; set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name); rc = do_sysctl(xch, &sysctl); xc_hypercall_buffer_free(xch, local); xc_hypercall_bounce_post(xch, name); return rc; }
int xc_vcpu_setaffinity(xc_interface *xch, uint32_t domid, int vcpu, xc_cpumap_t cpumap) { DECLARE_DOMCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); int ret = -1; int cpusize; cpusize = xc_get_cpumap_size(xch); if (!cpusize) { PERROR("Could not get number of cpus"); goto out; } local = xc_hypercall_buffer_alloc(xch, local, cpusize); if ( local == NULL ) { PERROR("Could not allocate memory for setvcpuaffinity domctl hypercall"); goto out; } domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; memcpy(local, cpumap, cpusize); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; ret = do_domctl(xch, &domctl); xc_hypercall_buffer_free(xch, local); out: return ret; }
int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch, uint64_t entry_maddr, uint32_t nr_segments, xen_kexec_segment_t *segments) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(segments, sizeof(*segments) * nr_segments, XC_HYPERCALL_BUFFER_BOUNCE_IN); DECLARE_HYPERCALL_BUFFER(xen_kexec_load_t, load); if ( xc_hypercall_bounce_pre(xch, segments) ) { PERROR("Could not allocate bounce buffer for kexec load hypercall"); goto out; } load = xc_hypercall_buffer_alloc(xch, load, sizeof(*load)); if ( load == NULL ) { PERROR("Could not allocate buffer for kexec load hypercall"); goto out; } load->type = type; load->arch = arch; load->entry_maddr = entry_maddr; load->nr_segments = nr_segments; set_xen_guest_handle(load->segments.h, segments); hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec_load; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(load); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, load); xc_hypercall_bounce_post(xch, segments); return ret; }
static void gcov_read(const char *fn) { struct xen_sysctl sys; uint32_t total_len; DECLARE_HYPERCALL_BUFFER(uint8_t, p); FILE *f; if (gcov_sysctl(XEN_SYSCTL_GCOV_get_size, &sys, NULL, 0) < 0) err(1, "getting total length"); total_len = sys.u.gcov_op.size; /* Shouldn't exceed a few hundred kilobytes */ if (total_len > 8u * 1024u * 1024u) errx(1, "gcov data too big %u bytes\n", total_len); p = xc_hypercall_buffer_alloc(xch, p, total_len); if (!p) err(1, "allocating buffer"); memset(p, 0, total_len); if (gcov_sysctl(XEN_SYSCTL_GCOV_read, &sys, HYPERCALL_BUFFER(p), total_len) < 0) err(1, "getting gcov data"); if (!strcmp(fn, "-")) f = stdout; else f = fopen(fn, "w"); if (!f) err(1, "opening output file"); if (fwrite(p, 1, total_len, f) != total_len) err(1, "writing gcov data to file"); if (f != stdout) fclose(f); xc_hypercall_buffer_free(xch, p); }
xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch) { int err = -1; xc_cpumap_t cpumap = NULL; int mapsize; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); mapsize = xc_get_cpumap_size(xch); if (mapsize <= 0) return NULL; local = xc_hypercall_buffer_alloc(xch, local, mapsize); if ( local == NULL ) { PERROR("Could not allocate locked memory for xc_cpupool_freeinfo"); return NULL; } sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); sysctl.u.cpupool_op.cpumap.nr_bits = mapsize * 8; err = do_sysctl_save(xch, &sysctl); if ( err < 0 ) goto out; cpumap = xc_cpumap_alloc(xch); if (cpumap == NULL) goto out; memcpy(cpumap, local, mapsize); out: xc_hypercall_buffer_free(xch, local); return cpumap; }
int xc_kexec_unload(xc_interface *xch, int type) { DECLARE_HYPERCALL_BUFFER(xen_kexec_unload_t, unload); int ret = -1; unload = xc_hypercall_buffer_alloc(xch, unload, sizeof(*unload)); if ( unload == NULL ) { PERROR("Could not alloc buffer for kexec unload hypercall"); goto out; } unload->type = type; ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op, KEXEC_CMD_kexec_unload, HYPERCALL_BUFFER_AS_ARG(unload)); out: xc_hypercall_buffer_free(xch, unload); return ret; }
int xc_kexec_exec(xc_interface *xch, int type) { DECLARE_HYPERCALL_BUFFER(xen_kexec_exec_t, exec); int ret = -1; exec = xc_hypercall_buffer_alloc(xch, exec, sizeof(*exec)); if ( exec == NULL ) { PERROR("Could not alloc bounce buffer for kexec_exec hypercall"); goto out; } exec->type = type; ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op, KEXEC_CMD_kexec, HYPERCALL_BUFFER_AS_ARG(exec)); out: xc_hypercall_buffer_free(xch, exec); return ret; }
int xc_dom_boot_image(struct xc_dom_image *dom) { DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt); xc_dominfo_t info; int rc; ctxt = xc_hypercall_buffer_alloc(dom->xch, ctxt, sizeof(*ctxt)); if ( ctxt == NULL ) return -1; DOMPRINTF_CALLED(dom->xch); /* misc stuff*/ if ( (rc = arch_setup_bootearly(dom)) != 0 ) return rc; /* collect some info */ rc = xc_domain_getinfo(dom->xch, dom->guest_domid, 1, &info); if ( rc < 0 ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: getdomaininfo failed (rc=%d)", __FUNCTION__, rc); return rc; } if ( rc == 0 || info.domid != dom->guest_domid ) { xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: Huh? No domains found (nr_domains=%d) " "or domid mismatch (%d != %d)", __FUNCTION__, rc, info.domid, dom->guest_domid); return -1; } dom->shared_info_mfn = info.shared_info_frame; /* sanity checks */ if ( !xc_dom_compat_check(dom) ) return -1; /* initial mm setup */ if ( (rc = xc_dom_update_guest_p2m(dom)) != 0 ) return rc; if ( dom->arch_hooks->setup_pgtables ) if ( (rc = dom->arch_hooks->setup_pgtables(dom)) != 0 ) return rc; if ( (rc = clear_page(dom, dom->console_pfn)) != 0 ) return rc; if ( (rc = clear_page(dom, dom->xenstore_pfn)) != 0 ) return rc; /* start info page */ if ( dom->arch_hooks->start_info ) dom->arch_hooks->start_info(dom); /* hypercall page */ if ( (rc = setup_hypercall_page(dom)) != 0 ) return rc; xc_dom_log_memory_footprint(dom); /* misc x86 stuff */ if ( (rc = arch_setup_bootlate(dom)) != 0 ) return rc; /* let the vm run */ memset(ctxt, 0, sizeof(*ctxt)); if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 ) return rc; xc_dom_unmap_all(dom); rc = launch_vm(dom->xch, dom->guest_domid, ctxt); xc_hypercall_buffer_free(dom->xch, ctxt); return rc; }
static void *_gnttab_map_table(xc_interface *xch, int domid, int *gnt_num) { int rc, i; struct gnttab_query_size query; struct gnttab_setup_table setup; DECLARE_HYPERCALL_BUFFER(unsigned long, frame_list); xen_pfn_t *pfn_list = NULL; grant_entry_v1_t *gnt = NULL; if ( !gnt_num ) return NULL; query.dom = domid; rc = xc_gnttab_op(xch, GNTTABOP_query_size, &query, sizeof(query), 1); if ( rc || (query.status != GNTST_okay) ) { ERROR("Could not query dom's grant size\n", domid); return NULL; } *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) ); frame_list = xc_hypercall_buffer_alloc(xch, frame_list, query.nr_frames * sizeof(unsigned long)); if ( !frame_list ) { ERROR("Could not allocate frame_list in xc_gnttab_map_table\n"); return NULL; } pfn_list = malloc(query.nr_frames * sizeof(xen_pfn_t)); if ( !pfn_list ) { ERROR("Could not allocate pfn_list in xc_gnttab_map_table\n"); goto err; } setup.dom = domid; setup.nr_frames = query.nr_frames; set_xen_guest_handle(setup.frame_list, frame_list); /* XXX Any race with other setup_table hypercall? */ rc = xc_gnttab_op(xch, GNTTABOP_setup_table, &setup, sizeof(setup), 1); if ( rc || (setup.status != GNTST_okay) ) { ERROR("Could not get grant table frame list\n"); goto err; } for ( i = 0; i < setup.nr_frames; i++ ) pfn_list[i] = frame_list[i]; gnt = xc_map_foreign_pages(xch, domid, PROT_READ, pfn_list, setup.nr_frames); if ( !gnt ) { ERROR("Could not map grant table\n"); goto err; } err: if ( frame_list ) xc_hypercall_buffer_free(xch, frame_list); if ( pfn_list ) free(pfn_list); return gnt; }
int main(int argc, char *argv[]) { xc_interface *xc_handle; uint32_t i, j, n; uint64_t time; double l, b, sl, sb; char name[100]; DECLARE_HYPERCALL_BUFFER(xc_lockprof_data_t, data); if ( (argc > 2) || ((argc == 2) && (strcmp(argv[1], "-r") != 0)) ) { printf("%s: [-r]\n", argv[0]); printf("no args: print lock profile data\n"); printf(" -r : reset profile data\n"); return 1; } if ( (xc_handle = xc_interface_open(0,0,0)) == 0 ) { fprintf(stderr, "Error opening xc interface: %d (%s)\n", errno, strerror(errno)); return 1; } if ( argc > 1 ) { if ( xc_lockprof_reset(xc_handle) != 0 ) { fprintf(stderr, "Error reseting profile data: %d (%s)\n", errno, strerror(errno)); return 1; } return 0; } n = 0; if ( xc_lockprof_query_number(xc_handle, &n) != 0 ) { fprintf(stderr, "Error getting number of profile records: %d (%s)\n", errno, strerror(errno)); return 1; } n += 32; /* just to be sure */ data = xc_hypercall_buffer_alloc(xc_handle, data, sizeof(*data) * n); if ( data == NULL ) { fprintf(stderr, "Could not allocate buffers: %d (%s)\n", errno, strerror(errno)); return 1; } i = n; if ( xc_lockprof_query(xc_handle, &i, &time, HYPERCALL_BUFFER(data)) != 0 ) { fprintf(stderr, "Error getting profile records: %d (%s)\n", errno, strerror(errno)); return 1; } if ( i > n ) { printf("data incomplete, %d records are missing!\n\n", i - n); i = n; } sl = 0; sb = 0; for ( j = 0; j < i; j++ ) { switch ( data[j].type ) { case LOCKPROF_TYPE_GLOBAL: sprintf(name, "global lock %s", data[j].name); break; case LOCKPROF_TYPE_PERDOM: sprintf(name, "domain %d lock %s", data[j].idx, data[j].name); break; default: sprintf(name, "unknown type(%d) %d lock %s", data[j].type, data[j].idx, data[j].name); break; } l = (double)(data[j].lock_time) / 1E+09; b = (double)(data[j].block_time) / 1E+09; sl += l; sb += b; printf("%-50s: lock:%12"PRId64"(%20.9fs), " "block:%12"PRId64"(%20.9fs)\n", name, data[j].lock_cnt, l, data[j].block_cnt, b); } l = (double)time / 1E+09; printf("total profiling time: %20.9fs\n", l); printf("total locked time: %20.9fs\n", sl); printf("total blocked time: %20.9fs\n", sb); xc_hypercall_buffer_free(xc_handle, data); return 0; }
static void signal_int_handler(int signo) { int i, j, k; struct timeval tv; int cx_cap = 0, px_cap = 0; DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_core); DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_socket); DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_node); xc_topologyinfo_t info = { 0 }; cpu_to_core = xc_hypercall_buffer_alloc(xc_handle, cpu_to_core, sizeof(*cpu_to_core) * MAX_NR_CPU); cpu_to_socket = xc_hypercall_buffer_alloc(xc_handle, cpu_to_socket, sizeof(*cpu_to_socket) * MAX_NR_CPU); cpu_to_node = xc_hypercall_buffer_alloc(xc_handle, cpu_to_node, sizeof(*cpu_to_node) * MAX_NR_CPU); if ( cpu_to_core == NULL || cpu_to_socket == NULL || cpu_to_node == NULL ) { fprintf(stderr, "failed to allocate hypercall buffers\n"); goto out; } if ( gettimeofday(&tv, NULL) == -1 ) { fprintf(stderr, "failed to get timeofday\n"); goto out ; } usec_end = tv.tv_sec * 1000000UL + tv.tv_usec; if ( get_cxstat_by_cpuid(xc_handle, 0, NULL) != -ENODEV ) { cx_cap = 1; for ( i = 0; i < max_cpu_nr; i++ ) if ( !get_cxstat_by_cpuid(xc_handle, i, &cxstat_end[i]) ) for ( j = 0; j < cxstat_end[i].nr; j++ ) { int64_t diff = (int64_t)cxstat_end[i].residencies[j] - (int64_t)cxstat_start[i].residencies[j]; if ( diff >=0 ) sum_cx[i] += diff; } } if ( get_pxstat_by_cpuid(xc_handle, 0, NULL) != -ENODEV ) { px_cap = 1; for ( i = 0; i < max_cpu_nr; i++ ) if ( !get_pxstat_by_cpuid(xc_handle, i , &pxstat_end[i]) ) for ( j = 0; j < pxstat_end[i].total; j++ ) sum_px[i] += pxstat_end[i].pt[j].residency - pxstat_start[i].pt[j].residency; } for ( i = 0; i < max_cpu_nr; i++ ) get_avgfreq_by_cpuid(xc_handle, i, &avgfreq[i]); printf("Elapsed time (ms): %"PRIu64"\n", (usec_end - usec_start) / 1000UL); for ( i = 0; i < max_cpu_nr; i++ ) { uint64_t res, triggers; double avg_res; printf("\nCPU%d:\tResidency(ms)\t\tAvg Res(ms)\n",i); if ( cx_cap && sum_cx[i] > 0 ) { for ( j = 0; j < cxstat_end[i].nr; j++ ) { int64_t diff = (int64_t)cxstat_end[i].residencies[j] - (int64_t)cxstat_start[i].residencies[j]; res = ( diff >= 0 ) ? diff : 0; triggers = cxstat_end[i].triggers[j] - cxstat_start[i].triggers[j]; /* * triggers may be zero if the CPU has been in this state for * the whole sample or if it never entered the state */ if ( triggers == 0 && cxstat_end[i].last == j ) avg_res = (double)sum_cx[i]/1000000.0; else avg_res = (triggers==0) ? 0: (double)res/triggers/1000000.0; printf(" C%d\t%"PRIu64"\t(%5.2f%%)\t%.2f\n", j, res/1000000UL, 100 * res / (double)sum_cx[i], avg_res ); } printf("\n"); } if ( px_cap && sum_px[i]>0 ) { for ( j = 0; j < pxstat_end[i].total; j++ ) { res = pxstat_end[i].pt[j].residency - pxstat_start[i].pt[j].residency; printf(" P%d\t%"PRIu64"\t(%5.2f%%)\n", j, res / 1000000UL, 100UL * res / (double)sum_px[i]); } } if ( px_cap && avgfreq[i] ) printf(" Avg freq\t%d\tKHz\n", avgfreq[i]); } set_xen_guest_handle(info.cpu_to_core, cpu_to_core); set_xen_guest_handle(info.cpu_to_socket, cpu_to_socket); set_xen_guest_handle(info.cpu_to_node, cpu_to_node); info.max_cpu_index = MAX_NR_CPU - 1; if ( cx_cap && !xc_topologyinfo(xc_handle, &info) ) { uint32_t socket_ids[MAX_NR_CPU]; uint32_t core_ids[MAX_NR_CPU]; uint32_t socket_nr = 0; uint32_t core_nr = 0; if ( info.max_cpu_index > MAX_NR_CPU - 1 ) info.max_cpu_index = MAX_NR_CPU - 1; /* check validity */ for ( i = 0; i <= info.max_cpu_index; i++ ) { if ( cpu_to_core[i] == INVALID_TOPOLOGY_ID || cpu_to_socket[i] == INVALID_TOPOLOGY_ID ) break; } if ( i > info.max_cpu_index ) { /* find socket nr & core nr per socket */ for ( i = 0; i <= info.max_cpu_index; i++ ) { for ( j = 0; j < socket_nr; j++ ) if ( cpu_to_socket[i] == socket_ids[j] ) break; if ( j == socket_nr ) { socket_ids[j] = cpu_to_socket[i]; socket_nr++; } for ( j = 0; j < core_nr; j++ ) if ( cpu_to_core[i] == core_ids[j] ) break; if ( j == core_nr ) { core_ids[j] = cpu_to_core[i]; core_nr++; } } /* print out CC? and PC? */ for ( i = 0; i < socket_nr; i++ ) { uint64_t res; for ( j = 0; j <= info.max_cpu_index; j++ ) { if ( cpu_to_socket[j] == socket_ids[i] ) break; } printf("\nSocket %d\n", socket_ids[i]); res = cxstat_end[j].pc2 - cxstat_start[j].pc2; printf("\tPC2\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); res = cxstat_end[j].pc3 - cxstat_start[j].pc3; printf("\tPC3\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); res = cxstat_end[j].pc6 - cxstat_start[j].pc6; printf("\tPC6\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); res = cxstat_end[j].pc7 - cxstat_start[j].pc7; printf("\tPC7\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); for ( k = 0; k < core_nr; k++ ) { for ( j = 0; j <= info.max_cpu_index; j++ ) { if ( cpu_to_socket[j] == socket_ids[i] && cpu_to_core[j] == core_ids[k] ) break; } printf("\t Core %d CPU %d\n", core_ids[k], j); res = cxstat_end[j].cc3 - cxstat_start[j].cc3; printf("\t\tCC3\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); res = cxstat_end[j].cc6 - cxstat_start[j].cc6; printf("\t\tCC6\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); res = cxstat_end[j].cc7 - cxstat_start[j].cc7; printf("\t\tCC7\t%"PRIu64" ms\t%.2f%%\n", res / 1000000UL, 100UL * res / (double)sum_cx[j]); } } } } /* some clean up and then exits */ for ( i = 0; i < 2 * max_cpu_nr; i++ ) { free(cxstat[i].triggers); free(cxstat[i].residencies); free(pxstat[i].trans_pt); free(pxstat[i].pt); } free(cxstat); free(pxstat); free(sum); free(avgfreq); out: xc_hypercall_buffer_free(xc_handle, cpu_to_core); xc_hypercall_buffer_free(xc_handle, cpu_to_socket); xc_hypercall_buffer_free(xc_handle, cpu_to_node); xc_interface_close(xc_handle); exit(0); }
static int xc_resource_op_multi(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops) { int rc, i, entries_size; xc_resource_op_t *op; multicall_entry_t *call; DECLARE_HYPERCALL_BUFFER(multicall_entry_t, call_list); xc_hypercall_buffer_array_t *platform_ops, *entries_list = NULL; call_list = xc_hypercall_buffer_alloc(xch, call_list, sizeof(*call_list) * nr_ops); if ( !call_list ) return -1; platform_ops = xc_hypercall_buffer_array_create(xch, nr_ops); if ( !platform_ops ) { rc = -1; goto out; } entries_list = xc_hypercall_buffer_array_create(xch, nr_ops); if ( !entries_list ) { rc = -1; goto out; } for ( i = 0; i < nr_ops; i++ ) { DECLARE_HYPERCALL_BUFFER(xen_platform_op_t, platform_op); DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries); op = ops + i; platform_op = xc_hypercall_buffer_array_alloc(xch, platform_ops, i, platform_op, sizeof(xen_platform_op_t)); if ( !platform_op ) { rc = -1; goto out; } entries_size = sizeof(xc_resource_entry_t) * op->nr_entries; entries = xc_hypercall_buffer_array_alloc(xch, entries_list, i, entries, entries_size); if ( !entries) { rc = -1; goto out; } memcpy(entries, op->entries, entries_size); call = call_list + i; call->op = __HYPERVISOR_platform_op; call->args[0] = HYPERCALL_BUFFER_AS_ARG(platform_op); platform_op->interface_version = XENPF_INTERFACE_VERSION; platform_op->cmd = XENPF_resource_op; platform_op->u.resource_op.cpu = op->cpu; platform_op->u.resource_op.nr_entries = op->nr_entries; set_xen_guest_handle(platform_op->u.resource_op.entries, entries); } rc = do_multicall_op(xch, HYPERCALL_BUFFER(call_list), nr_ops); for ( i = 0; i < nr_ops; i++ ) { DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries); op = ops + i; call = call_list + i; op->result = call->result; entries_size = sizeof(xc_resource_entry_t) * op->nr_entries; entries = xc_hypercall_buffer_array_get(xch, entries_list, i, entries, entries_size); memcpy(op->entries, entries, entries_size); } out: xc_hypercall_buffer_array_destroy(xch, entries_list); xc_hypercall_buffer_array_destroy(xch, platform_ops); xc_hypercall_buffer_free(xch, call_list); return rc; }
int xc_acm_op(xc_interface *xch, int cmd, void *arg, unsigned long arg_size) { int ret; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(struct xen_acmctl, acmctl); acmctl = xc_hypercall_buffer_alloc(xch, acmctl, sizeof(*acmctl)); if ( acmctl == NULL ) { PERROR("Could not allocate memory for ACM OP hypercall"); return -EFAULT; } switch (cmd) { case ACMOP_setpolicy: { struct acm_setpolicy *setpolicy = (struct acm_setpolicy *)arg; memcpy(&acmctl->u.setpolicy, setpolicy, sizeof(struct acm_setpolicy)); } break; case ACMOP_getpolicy: { struct acm_getpolicy *getpolicy = (struct acm_getpolicy *)arg; memcpy(&acmctl->u.getpolicy, getpolicy, sizeof(struct acm_getpolicy)); } break; case ACMOP_dumpstats: { struct acm_dumpstats *dumpstats = (struct acm_dumpstats *)arg; memcpy(&acmctl->u.dumpstats, dumpstats, sizeof(struct acm_dumpstats)); } break; case ACMOP_getssid: { struct acm_getssid *getssid = (struct acm_getssid *)arg; memcpy(&acmctl->u.getssid, getssid, sizeof(struct acm_getssid)); } break; case ACMOP_getdecision: { struct acm_getdecision *getdecision = (struct acm_getdecision *)arg; memcpy(&acmctl->u.getdecision, getdecision, sizeof(struct acm_getdecision)); } break; case ACMOP_chgpolicy: { struct acm_change_policy *change_policy = (struct acm_change_policy *)arg; memcpy(&acmctl->u.change_policy, change_policy, sizeof(struct acm_change_policy)); } break; case ACMOP_relabeldoms: { struct acm_relabel_doms *relabel_doms = (struct acm_relabel_doms *)arg; memcpy(&acmctl->u.relabel_doms, relabel_doms, sizeof(struct acm_relabel_doms)); } break; } acmctl->cmd = cmd; acmctl->interface_version = ACM_INTERFACE_VERSION; hypercall.op = __HYPERVISOR_xsm_op; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(acmctl); if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0) { if ( errno == EACCES ) DPRINTF("acmctl operation failed -- need to" " rebuild the user-space tool set?\n"); } switch (cmd) { case ACMOP_getdecision: { struct acm_getdecision *getdecision = (struct acm_getdecision *)arg; memcpy(getdecision, &acmctl->u.getdecision, sizeof(struct acm_getdecision)); break; } } xc_hypercall_buffer_free(xch, acmctl); return ret; }