static void gcov_read(const char *fn) { struct xen_sysctl sys; uint32_t total_len; DECLARE_HYPERCALL_BUFFER(uint8_t, p); FILE *f; if (gcov_sysctl(XEN_SYSCTL_GCOV_get_size, &sys, NULL, 0) < 0) err(1, "getting total length"); total_len = sys.u.gcov_op.size; /* Shouldn't exceed a few hundred kilobytes */ if (total_len > 8u * 1024u * 1024u) errx(1, "gcov data too big %u bytes\n", total_len); p = xc_hypercall_buffer_alloc(xch, p, total_len); if (!p) err(1, "allocating buffer"); memset(p, 0, total_len); if (gcov_sysctl(XEN_SYSCTL_GCOV_read, &sys, HYPERCALL_BUFFER(p), total_len) < 0) err(1, "getting gcov data"); if (!strcmp(fn, "-")) f = stdout; else f = fopen(fn, "w"); if (!f) err(1, "opening output file"); if (fwrite(p, 1, total_len, f) != total_len) err(1, "writing gcov data to file"); if (f != stdout) fclose(f); xc_hypercall_buffer_free(xch, p); }
int main(int argc, char *argv[]) { xc_interface *xc_handle; uint32_t i, j, n; uint64_t time; double l, b, sl, sb; char name[100]; DECLARE_HYPERCALL_BUFFER(xc_lockprof_data_t, data); if ( (argc > 2) || ((argc == 2) && (strcmp(argv[1], "-r") != 0)) ) { printf("%s: [-r]\n", argv[0]); printf("no args: print lock profile data\n"); printf(" -r : reset profile data\n"); return 1; } if ( (xc_handle = xc_interface_open(0,0,0)) == 0 ) { fprintf(stderr, "Error opening xc interface: %d (%s)\n", errno, strerror(errno)); return 1; } if ( argc > 1 ) { if ( xc_lockprof_reset(xc_handle) != 0 ) { fprintf(stderr, "Error reseting profile data: %d (%s)\n", errno, strerror(errno)); return 1; } return 0; } n = 0; if ( xc_lockprof_query_number(xc_handle, &n) != 0 ) { fprintf(stderr, "Error getting number of profile records: %d (%s)\n", errno, strerror(errno)); return 1; } n += 32; /* just to be sure */ data = xc_hypercall_buffer_alloc(xc_handle, data, sizeof(*data) * n); if ( data == NULL ) { fprintf(stderr, "Could not allocate buffers: %d (%s)\n", errno, strerror(errno)); return 1; } i = n; if ( xc_lockprof_query(xc_handle, &i, &time, HYPERCALL_BUFFER(data)) != 0 ) { fprintf(stderr, "Error getting profile records: %d (%s)\n", errno, strerror(errno)); return 1; } if ( i > n ) { printf("data incomplete, %d records are missing!\n\n", i - n); i = n; } sl = 0; sb = 0; for ( j = 0; j < i; j++ ) { switch ( data[j].type ) { case LOCKPROF_TYPE_GLOBAL: sprintf(name, "global lock %s", data[j].name); break; case LOCKPROF_TYPE_PERDOM: sprintf(name, "domain %d lock %s", data[j].idx, data[j].name); break; default: sprintf(name, "unknown type(%d) %d lock %s", data[j].type, data[j].idx, data[j].name); break; } l = (double)(data[j].lock_time) / 1E+09; b = (double)(data[j].block_time) / 1E+09; sl += l; sb += b; printf("%-50s: lock:%12"PRId64"(%20.9fs), " "block:%12"PRId64"(%20.9fs)\n", name, data[j].lock_cnt, l, data[j].block_cnt, b); } l = (double)time / 1E+09; printf("total profiling time: %20.9fs\n", l); printf("total locked time: %20.9fs\n", sl); printf("total blocked time: %20.9fs\n", sb); xc_hypercall_buffer_free(xc_handle, data); return 0; }
/* * The heart of this function is to get an array of xen_livepatch_status_t. * * However it is complex because it has to deal with the hypervisor * returning some of the requested data or data being stale * (another hypercall might alter the list). * * The parameters that the function expects to contain data from * the hypervisor are: 'info', 'name', and 'len'. The 'done' and * 'left' are also updated with the number of entries filled out * and respectively the number of entries left to get from hypervisor. * * It is expected that the caller of this function will take the * 'left' and use the value for 'start'. This way we have an * cursor in the array. Note that the 'info','name', and 'len' will * be updated at the subsequent calls. * * The 'max' is to be provided by the caller with the maximum * number of entries that 'info', 'name', and 'len' arrays can * be filled up with. * * Each entry in the 'name' array is expected to be of XEN_LIVEPATCH_NAME_SIZE * length. * * Each entry in the 'info' array is expected to be of xen_livepatch_status_t * structure size. * * Each entry in the 'len' array is expected to be of uint32_t size. * * The return value is zero if the hypercall completed successfully. * Note that the return value is _not_ the amount of entries filled * out - that is saved in 'done'. * * If there was an error performing the operation, the return value * will contain an negative -EXX type value. The 'done' and 'left' * will contain the number of entries that had been succesfully * retrieved (if any). */ int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start, xen_livepatch_status_t *info, char *name, uint32_t *len, unsigned int *done, unsigned int *left) { int rc; DECLARE_SYSCTL; /* The sizes are adjusted later - hence zero. */ DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); uint32_t max_batch_sz, nr; uint32_t version = 0, retries = 0; uint32_t adjust = 0; ssize_t sz; if ( !max || !info || !name || !len ) { errno = EINVAL; return -1; } sysctl.cmd = XEN_SYSCTL_livepatch_op; sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST; sysctl.u.livepatch.pad = 0; sysctl.u.livepatch.u.list.version = 0; sysctl.u.livepatch.u.list.idx = start; sysctl.u.livepatch.u.list.pad = 0; max_batch_sz = max; /* Convience value. */ sz = sizeof(*name) * XEN_LIVEPATCH_NAME_SIZE; *done = 0; *left = 0; do { /* * The first time we go in this loop our 'max' may be bigger * than what the hypervisor is comfortable with - hence the first * couple of loops may adjust the number of entries we will * want filled (tracked by 'nr'). * * N.B. This is a do { } while loop and the right hand side of * the conditional when adjusting will evaluate to false (as * *left is set to zero before the loop. Hence we need this * adjust - even if we reset it at the start of the loop. */ if ( adjust ) adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */ nr = min(max - *done, max_batch_sz); sysctl.u.livepatch.u.list.nr = nr; /* Fix the size (may vary between hypercalls). */ HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info)); HYPERCALL_BOUNCE_SET_SIZE(name, nr * nr); HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len)); /* Move the pointer to proper offset into 'info'. */ (HYPERCALL_BUFFER(info))->ubuf = info + *done; (HYPERCALL_BUFFER(name))->ubuf = name + (sz * *done); (HYPERCALL_BUFFER(len))->ubuf = len + *done; /* Allocate memory. */ rc = xc_hypercall_bounce_pre(xch, info); if ( rc ) break; rc = xc_hypercall_bounce_pre(xch, name); if ( rc ) break; rc = xc_hypercall_bounce_pre(xch, len); if ( rc ) break; set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info); set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name); set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len); rc = do_sysctl(xch, &sysctl); /* * From here on we MUST call xc_hypercall_bounce. If rc < 0 we * end up doing it (outside the loop), so using a break is OK. */ if ( rc < 0 && errno == E2BIG ) { if ( max_batch_sz <= 1 ) break; max_batch_sz >>= 1; adjust = 1; /* For the loop conditional to let us loop again. */ /* No memory leaks! */ xc_hypercall_bounce_post(xch, info); xc_hypercall_bounce_post(xch, name); xc_hypercall_bounce_post(xch, len); continue; } else if ( rc < 0 ) /* For all other errors we bail out. */ break; if ( !version ) version = sysctl.u.livepatch.u.list.version; if ( sysctl.u.livepatch.u.list.version != version ) { /* We could make this configurable as parameter? */ if ( retries++ > 3 ) { rc = -1; errno = EBUSY; break; } *done = 0; /* Retry from scratch. */ version = sysctl.u.livepatch.u.list.version; adjust = 1; /* And make sure we continue in the loop. */ /* No memory leaks. */ xc_hypercall_bounce_post(xch, info); xc_hypercall_bounce_post(xch, name); xc_hypercall_bounce_post(xch, len); continue; } /* We should never hit this, but just in case. */ if ( rc > nr ) { errno = EOVERFLOW; /* Overflow! */ rc = -1; break; } *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */ /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */ HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info))); HYPERCALL_BOUNCE_SET_SIZE(name, (rc * sz)); HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len))); /* Bounce the data and free the bounce buffer. */ xc_hypercall_bounce_post(xch, info); xc_hypercall_bounce_post(xch, name); xc_hypercall_bounce_post(xch, len); /* And update how many elements of info we have copied into. */ *done += rc; /* Update idx. */ sysctl.u.livepatch.u.list.idx = *done; } while ( adjust || (*done < max && *left != 0) );
static int xc_resource_op_multi(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops) { int rc, i, entries_size; xc_resource_op_t *op; multicall_entry_t *call; DECLARE_HYPERCALL_BUFFER(multicall_entry_t, call_list); xc_hypercall_buffer_array_t *platform_ops, *entries_list = NULL; call_list = xc_hypercall_buffer_alloc(xch, call_list, sizeof(*call_list) * nr_ops); if ( !call_list ) return -1; platform_ops = xc_hypercall_buffer_array_create(xch, nr_ops); if ( !platform_ops ) { rc = -1; goto out; } entries_list = xc_hypercall_buffer_array_create(xch, nr_ops); if ( !entries_list ) { rc = -1; goto out; } for ( i = 0; i < nr_ops; i++ ) { DECLARE_HYPERCALL_BUFFER(xen_platform_op_t, platform_op); DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries); op = ops + i; platform_op = xc_hypercall_buffer_array_alloc(xch, platform_ops, i, platform_op, sizeof(xen_platform_op_t)); if ( !platform_op ) { rc = -1; goto out; } entries_size = sizeof(xc_resource_entry_t) * op->nr_entries; entries = xc_hypercall_buffer_array_alloc(xch, entries_list, i, entries, entries_size); if ( !entries) { rc = -1; goto out; } memcpy(entries, op->entries, entries_size); call = call_list + i; call->op = __HYPERVISOR_platform_op; call->args[0] = HYPERCALL_BUFFER_AS_ARG(platform_op); platform_op->interface_version = XENPF_INTERFACE_VERSION; platform_op->cmd = XENPF_resource_op; platform_op->u.resource_op.cpu = op->cpu; platform_op->u.resource_op.nr_entries = op->nr_entries; set_xen_guest_handle(platform_op->u.resource_op.entries, entries); } rc = do_multicall_op(xch, HYPERCALL_BUFFER(call_list), nr_ops); for ( i = 0; i < nr_ops; i++ ) { DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries); op = ops + i; call = call_list + i; op->result = call->result; entries_size = sizeof(xc_resource_entry_t) * op->nr_entries; entries = xc_hypercall_buffer_array_get(xch, entries_list, i, entries, entries_size); memcpy(op->entries, entries, entries_size); } out: xc_hypercall_buffer_array_destroy(xch, entries_list); xc_hypercall_buffer_array_destroy(xch, platform_ops); xc_hypercall_buffer_free(xch, call_list); return rc; }
int xc_version(xc_interface *xch, int cmd, void *arg) { DECLARE_HYPERCALL_BOUNCE(arg, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); /* Size unknown until cmd decoded */ size_t sz; int rc; switch ( cmd ) { case XENVER_version: sz = 0; break; case XENVER_extraversion: sz = sizeof(xen_extraversion_t); break; case XENVER_compile_info: sz = sizeof(xen_compile_info_t); break; case XENVER_capabilities: sz = sizeof(xen_capabilities_info_t); break; case XENVER_changeset: sz = sizeof(xen_changeset_info_t); break; case XENVER_platform_parameters: sz = sizeof(xen_platform_parameters_t); break; case XENVER_get_features: sz = sizeof(xen_feature_info_t); break; case XENVER_pagesize: sz = 0; break; case XENVER_guest_handle: sz = sizeof(xen_domain_handle_t); break; case XENVER_commandline: sz = sizeof(xen_commandline_t); break; case XENVER_build_id: { xen_build_id_t *build_id = (xen_build_id_t *)arg; sz = sizeof(*build_id) + build_id->len; HYPERCALL_BOUNCE_SET_DIR(arg, XC_HYPERCALL_BUFFER_BOUNCE_BOTH); break; } default: ERROR("xc_version: unknown command %d\n", cmd); return -EINVAL; } HYPERCALL_BOUNCE_SET_SIZE(arg, sz); if ( (sz != 0) && xc_hypercall_bounce_pre(xch, arg) ) { PERROR("Could not bounce buffer for version hypercall"); return -ENOMEM; } rc = do_xen_version(xch, cmd, HYPERCALL_BUFFER(arg)); if ( sz != 0 ) xc_hypercall_bounce_post(xch, arg); return rc; }