xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, uint32_t poolid) { int err = 0; xc_cpupoolinfo_t *info = NULL; int local_size; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); local_size = xc_get_cpumap_size(xch); if (local_size <= 0) { PERROR("Could not get number of cpus"); return NULL; } local = xc_hypercall_buffer_alloc(xch, local, local_size); if ( local == NULL ) { PERROR("Could not allocate locked memory for xc_cpupool_getinfo"); return NULL; } sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; sysctl.u.cpupool_op.cpupool_id = poolid; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); sysctl.u.cpupool_op.cpumap.nr_bits = local_size * 8; err = do_sysctl_save(xch, &sysctl); if ( err < 0 ) goto out; info = calloc(1, sizeof(xc_cpupoolinfo_t)); if ( !info ) goto out; info->cpumap = xc_cpumap_alloc(xch); if (!info->cpumap) { free(info); info = NULL; goto out; } info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; info->sched_id = sysctl.u.cpupool_op.sched_id; info->n_dom = sysctl.u.cpupool_op.n_dom; memcpy(info->cpumap, local, local_size); out: xc_hypercall_buffer_free(xch, local); return info; }
static void configure_vcpus(struct flags f){ struct xen_domctl_sched_credit sdom; int i, j, r, size, pcpus_supplied, min; xc_cpumap_t cpumap; size = xc_get_cpumap_size(xch) * 8; /* array is of uint8_t */ for (i=0; i<f.vcpus; i++){ if (f.vcpu_affinity[i]){ /* NULL means unset */ pcpus_supplied = strlen(f.vcpu_affinity[i]); min = (pcpus_supplied < size)?pcpus_supplied:size; cpumap = xc_cpumap_alloc(xch); if (cpumap == NULL) failwith_oss_xc("xc_cpumap_alloc"); for (j=0; j<min; j++) { if (f.vcpu_affinity[i][j] == '1') cpumap[j/8] |= 1 << (j&7); } r = xc_vcpu_setaffinity(xch, domid, i, cpumap, NULL, XEN_VCPUAFFINITY_HARD); free(cpumap); if (r) { failwith_oss_xc("xc_vcpu_setaffinity"); } } } r = xc_sched_credit_domain_get(xch, domid, &sdom); /* This should only happen when a different scheduler is set */ if (r) { xg_info("Failed to get credit scheduler parameters: scheduler not enabled?\n"); return; } if (f.vcpu_weight != 0L) sdom.weight = f.vcpu_weight; if (f.vcpu_cap != 0L) sdom.cap = f.vcpu_cap; /* This shouldn't fail, if "get" above succeeds. This error is fatal to highlight the need to investigate further. */ r = xc_sched_credit_domain_set(xch, domid, &sdom); if (r) failwith_oss_xc("xc_sched_credit_domain_set"); }
xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch) { int err = -1; xc_cpumap_t cpumap = NULL; int mapsize; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); mapsize = xc_get_cpumap_size(xch); if (mapsize <= 0) return NULL; local = xc_hypercall_buffer_alloc(xch, local, mapsize); if ( local == NULL ) { PERROR("Could not allocate locked memory for xc_cpupool_freeinfo"); return NULL; } sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); sysctl.u.cpupool_op.cpumap.nr_bits = mapsize * 8; err = do_sysctl_save(xch, &sysctl); if ( err < 0 ) goto out; cpumap = xc_cpumap_alloc(xch); if (cpumap == NULL) goto out; memcpy(cpumap, local, mapsize); out: xc_hypercall_buffer_free(xch, local); return cpumap; }