static int virNetDevMacVLanOnceInit(void) { if (!macvtapIDs && !(macvtapIDs = virBitmapNew(MACVLAN_MAX_ID + 1))) return -1; if (!macvlanIDs && !(macvlanIDs = virBitmapNew(MACVLAN_MAX_ID + 1))) return -1; return 0; }
/** * virBitmapNewData: * @data: the data * @len: length of @data in bytes * * Allocate a bitmap from a chunk of data containing bits * information * * Returns a pointer to the allocated bitmap or NULL if * memory cannot be allocated. */ virBitmapPtr virBitmapNewData(const void *data, int len) { virBitmapPtr bitmap; size_t i, j; unsigned long *p; const unsigned char *bytes = data; bitmap = virBitmapNew(len * CHAR_BIT); if (!bitmap) return NULL; /* le64toh is not provided by gnulib, so we do the conversion by hand */ p = bitmap->map; for (i = j = 0; i < len; i++, j++) { if (j == sizeof(*p)) { j = 0; p++; } *p |= (unsigned long) bytes[i] << (j * CHAR_BIT); } return bitmap; }
virPortAllocatorPtr virPortAllocatorNew(const char *name, unsigned short start, unsigned short end) { virPortAllocatorPtr pa; if (start >= end) { virReportInvalidArg(start, "start port %d must be less than end port %d", start, end); return NULL; } if (virPortAllocatorInitialize() < 0) return NULL; if (!(pa = virObjectLockableNew(virPortAllocatorClass))) return NULL; pa->start = start; pa->end = end; if (!(pa->bitmap = virBitmapNew((end-start)+1)) || VIR_STRDUP(pa->name, name) < 0) { virObjectUnref(pa); return NULL; } return pa; }
virBitmapPtr virProcessGetAffinity(pid_t pid) { size_t i; cpu_set_t *mask; size_t masklen; size_t ncpus; virBitmapPtr ret = NULL; # ifdef CPU_ALLOC /* 262144 cpus ought to be enough for anyone */ ncpus = 1024 << 8; masklen = CPU_ALLOC_SIZE(ncpus); mask = CPU_ALLOC(ncpus); if (!mask) { virReportOOMError(); return NULL; } CPU_ZERO_S(masklen, mask); # else ncpus = 1024; if (VIR_ALLOC(mask) < 0) return NULL; masklen = sizeof(*mask); CPU_ZERO(mask); # endif if (sched_getaffinity(pid, masklen, mask) < 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); goto cleanup; } if (!(ret = virBitmapNew(ncpus))) goto cleanup; for (i = 0; i < ncpus; i++) { # ifdef CPU_ALLOC /* coverity[overrun-local] */ if (CPU_ISSET_S(i, masklen, mask)) ignore_value(virBitmapSetBit(ret, i)); # else if (CPU_ISSET(i, mask)) ignore_value(virBitmapSetBit(ret, i)); # endif } cleanup: # ifdef CPU_ALLOC CPU_FREE(mask); # else VIR_FREE(mask); # endif return ret; }
int virNumaGetNodeCPUs(int node, virBitmapPtr *cpus) { unsigned long *mask = NULL; unsigned long *allonesmask = NULL; virBitmapPtr cpumap = NULL; int ncpus = 0; int max_n_cpus = virNumaGetMaxCPUs(); int mask_n_bytes = max_n_cpus / 8; size_t i; int ret = -1; *cpus = NULL; if (VIR_ALLOC_N(mask, mask_n_bytes / sizeof(*mask)) < 0) goto cleanup; if (VIR_ALLOC_N(allonesmask, mask_n_bytes / sizeof(*mask)) < 0) goto cleanup; memset(allonesmask, 0xff, mask_n_bytes); /* The first time this returns -1, ENOENT if node doesn't exist... */ if (numa_node_to_cpus(node, mask, mask_n_bytes) < 0) { VIR_WARN("NUMA topology for cell %d is not available, ignoring", node); ret = -2; goto cleanup; } /* second, third... times it returns an all-1's mask */ if (memcmp(mask, allonesmask, mask_n_bytes) == 0) { VIR_DEBUG("NUMA topology for cell %d is invalid, ignoring", node); ret = -2; goto cleanup; } if (!(cpumap = virBitmapNew(max_n_cpus))) goto cleanup; for (i = 0; i < max_n_cpus; i++) { if (MASK_CPU_ISSET(mask, i)) { ignore_value(virBitmapSetBit(cpumap, i)); ncpus++; } } *cpus = cpumap; cpumap = NULL; ret = ncpus; cleanup: VIR_FREE(mask); VIR_FREE(allonesmask); virBitmapFree(cpumap); return ret; }
int virProcessGetAffinity(pid_t pid ATTRIBUTE_UNUSED, virBitmapPtr *map, int maxcpu) { if (!(*map = virBitmapNew(maxcpu))) { virReportOOMError(); return -1; } virBitmapSetAll(*map); return 0; }
/** * virBitmapNewCopy: * @src: the source bitmap. * * Makes a copy of bitmap @src. * * returns the copied bitmap on success, or NULL otherwise. Caller * should call virBitmapFree to free the returned bitmap. */ virBitmapPtr virBitmapNewCopy(virBitmapPtr src) { virBitmapPtr dst; if ((dst = virBitmapNew(src->max_bit)) == NULL) return NULL; if (virBitmapCopy(dst, src) != 0) { virBitmapFree(dst); return NULL; } return dst; }
static virPortAllocatorPtr virPortAllocatorNew(void) { virPortAllocatorPtr pa; if (!(pa = virObjectLockableNew(virPortAllocatorClass))) return NULL; if (!(pa->bitmap = virBitmapNew(VIR_PORT_ALLOCATOR_NUM_PORTS))) goto error; return pa; error: virObjectUnref(pa); return NULL; }
/* * Build NUMA Toplogy with cell id starting from (0 + seq) * for testing */ static virCapsPtr buildNUMATopology(int seq) { virCapsPtr caps; virCapsHostNUMACellCPUPtr cell_cpus = NULL; int core_id, cell_id; int id; if ((caps = virCapabilitiesNew(VIR_ARCH_X86_64, false, false)) == NULL) goto error; id = 0; for (cell_id = 0; cell_id < MAX_CELLS; cell_id++) { if (VIR_ALLOC_N(cell_cpus, MAX_CPUS_IN_CELL) < 0) goto error; for (core_id = 0; core_id < MAX_CPUS_IN_CELL; core_id++) { cell_cpus[core_id].id = id + core_id; cell_cpus[core_id].socket_id = cell_id + seq; cell_cpus[core_id].core_id = id + core_id; if (!(cell_cpus[core_id].siblings = virBitmapNew(MAX_CPUS_IN_CELL))) goto error; ignore_value(virBitmapSetBit(cell_cpus[core_id].siblings, id)); } id++; if (virCapabilitiesAddHostNUMACell(caps, cell_id + seq, MAX_MEM_IN_CELL, MAX_CPUS_IN_CELL, cell_cpus, VIR_ARCH_NONE, NULL, VIR_ARCH_NONE, NULL) < 0) goto error; cell_cpus = NULL; } return caps; error: virCapabilitiesClearHostNUMACellCPUTopology(cell_cpus, MAX_CPUS_IN_CELL); VIR_FREE(cell_cpus); virObjectUnref(caps); return NULL; }
/* * To be run while still single threaded */ static int virLXCControllerSetupCpuAffinity(virLXCControllerPtr ctrl) { int hostcpus, maxcpu = CPU_SETSIZE; virNodeInfo nodeinfo; virBitmapPtr cpumap, cpumapToSet; VIR_DEBUG("Setting CPU affinity"); if (nodeGetInfo(NULL, &nodeinfo) < 0) return -1; /* setaffinity fails if you set bits for CPUs which * aren't present, so we have to limit ourselves */ hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo); if (maxcpu > hostcpus) maxcpu = hostcpus; cpumap = virBitmapNew(maxcpu); if (!cpumap) return -1; cpumapToSet = cpumap; if (ctrl->def->cpumask) { cpumapToSet = ctrl->def->cpumask; } else { /* You may think this is redundant, but we can't assume libvirtd * itself is running on all pCPUs, so we need to explicitly set * the spawned LXC instance to all pCPUs if no map is given in * its config file */ virBitmapSetAll(cpumap); } /* We are presuming we are running between fork/exec of LXC * so use '0' to indicate our own process ID. No threads are * running at this point */ if (virProcessInfoSetAffinity(0 /* Self */, cpumapToSet) < 0) { virBitmapFree(cpumap); return -1; } virBitmapFree(cpumap); return 0; }
static dnsmasqCapsPtr dnsmasqCapsNewEmpty(const char *binaryPath) { dnsmasqCapsPtr caps; if (dnsmasqCapsInitialize() < 0) return NULL; if (!(caps = virObjectNew(dnsmasqCapsClass))) return NULL; if (!(caps->flags = virBitmapNew(DNSMASQ_CAPS_LAST))) goto error; if (VIR_STRDUP(caps->binaryPath, binaryPath ? binaryPath : DNSMASQ) < 0) goto error; return caps; error: virObjectUnref(caps); return NULL; }
virBitmapPtr virCapabilitiesGetCpusForNodemask(virCapsPtr caps, virBitmapPtr nodemask) { virBitmapPtr ret = NULL; unsigned int maxcpu = virCapabilitiesGetHostMaxcpu(caps); ssize_t node = -1; if (!(ret = virBitmapNew(maxcpu + 1))) return NULL; while ((node = virBitmapNextSetBit(nodemask, node)) >= 0) { if (virCapabilitiesGetCpusForNode(caps, node, ret) < 0) { virBitmapFree(ret); return NULL; } } return ret; }
/* virDomainVirtioSerialAddrSetAddController * * Adds virtio serial ports of the existing controller * to the address set. */ int virDomainVirtioSerialAddrSetAddController(virDomainVirtioSerialAddrSetPtr addrs, virDomainControllerDefPtr cont) { int ret = -1; int ports; virDomainVirtioSerialControllerPtr cnt = NULL; ssize_t insertAt; if (cont->type != VIR_DOMAIN_CONTROLLER_TYPE_VIRTIO_SERIAL) return 0; ports = cont->opts.vioserial.ports; if (ports == -1) ports = VIR_DOMAIN_DEFAULT_VIRTIO_SERIAL_PORTS; VIR_DEBUG("Adding virtio serial controller index %u with %d" " ports to the address set", cont->idx, ports); if (VIR_ALLOC(cnt) < 0) goto cleanup; if (!(cnt->ports = virBitmapNew(ports))) goto cleanup; cnt->idx = cont->idx; if ((insertAt = virDomainVirtioSerialAddrPlaceController(addrs, cnt)) < -1) goto cleanup; if (VIR_INSERT_ELEMENT(addrs->controllers, insertAt, addrs->ncontrollers, cnt) < 0) goto cleanup; ret = 0; cleanup: virDomainVirtioSerialControllerFree(cnt); return ret; }
int virProcessGetAffinity(pid_t pid, virBitmapPtr *map, int maxcpu) { size_t i; cpuset_t mask; if (!(*map = virBitmapNew(maxcpu))) return -1; CPU_ZERO(&mask); if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, pid, sizeof(mask), &mask) != 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return -1; } for (i = 0; i < maxcpu; i++) if (CPU_ISSET(i, &mask)) ignore_value(virBitmapSetBit(*map, i)); return 0; }
virBitmapPtr virProcessGetAffinity(pid_t pid) { size_t i; cpuset_t mask; virBitmapPtr ret = NULL; CPU_ZERO(&mask); if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, pid, sizeof(mask), &mask) != 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return NULL; } if (!(ret = virBitmapNew(sizeof(mask) * 8))) return NULL; for (i = 0; i < sizeof(mask) * 8; i++) if (CPU_ISSET(i, &mask)) ignore_value(virBitmapSetBit(ret, i)); return ret; }
static int libxlCapsInitNuma(libxl_ctx *ctx, virCapsPtr caps) { libxl_numainfo *numa_info = NULL; libxl_cputopology *cpu_topo = NULL; int nr_nodes = 0, nr_cpus = 0; virCapsHostNUMACellCPUPtr *cpus = NULL; int *nr_cpus_node = NULL; size_t i; int ret = -1; /* Let's try to fetch all the topology information */ numa_info = libxl_get_numainfo(ctx, &nr_nodes); if (numa_info == NULL || nr_nodes == 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("libxl_get_numainfo failed")); goto cleanup; } else { cpu_topo = libxl_get_cpu_topology(ctx, &nr_cpus); if (cpu_topo == NULL || nr_cpus == 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("libxl_get_cpu_topology failed")); goto cleanup; } } if (VIR_ALLOC_N(cpus, nr_nodes) < 0) goto cleanup; if (VIR_ALLOC_N(nr_cpus_node, nr_nodes) < 0) goto cleanup; /* For each node, prepare a list of CPUs belonging to that node */ for (i = 0; i < nr_cpus; i++) { int node = cpu_topo[i].node; if (cpu_topo[i].core == LIBXL_CPUTOPOLOGY_INVALID_ENTRY) continue; nr_cpus_node[node]++; if (nr_cpus_node[node] == 1) { if (VIR_ALLOC(cpus[node]) < 0) goto cleanup; } else { if (VIR_REALLOC_N(cpus[node], nr_cpus_node[node]) < 0) goto cleanup; } /* Mapping between what libxl tells and what libvirt wants */ cpus[node][nr_cpus_node[node]-1].id = i; cpus[node][nr_cpus_node[node]-1].socket_id = cpu_topo[i].socket; cpus[node][nr_cpus_node[node]-1].core_id = cpu_topo[i].core; /* Allocate the siblings maps. We will be filling them later */ cpus[node][nr_cpus_node[node]-1].siblings = virBitmapNew(nr_cpus); if (!cpus[node][nr_cpus_node[node]-1].siblings) { virReportOOMError(); goto cleanup; } } /* Let's now populate the siblings bitmaps */ for (i = 0; i < nr_cpus; i++) { int node = cpu_topo[i].node; size_t j; if (cpu_topo[i].core == LIBXL_CPUTOPOLOGY_INVALID_ENTRY) continue; for (j = 0; j < nr_cpus_node[node]; j++) { if (cpus[node][j].socket_id == cpu_topo[i].socket && cpus[node][j].core_id == cpu_topo[i].core) ignore_value(virBitmapSetBit(cpus[node][j].siblings, i)); } } for (i = 0; i < nr_nodes; i++) { if (numa_info[i].size == LIBXL_NUMAINFO_INVALID_ENTRY) continue; if (virCapabilitiesAddHostNUMACell(caps, i, nr_cpus_node[i], numa_info[i].size / 1024, cpus[i]) < 0) { virCapabilitiesClearHostNUMACellCPUTopology(cpus[i], nr_cpus_node[i]); goto cleanup; } /* This is safe, as the CPU list is now stored in the NUMA cell */ cpus[i] = NULL; } ret = 0; cleanup: if (ret != 0) { for (i = 0; cpus && i < nr_nodes; i++) VIR_FREE(cpus[i]); virCapabilitiesFreeNUMAInfo(caps); } VIR_FREE(cpus); VIR_FREE(nr_cpus_node); libxl_cputopology_list_free(cpu_topo, nr_cpus); libxl_numainfo_list_free(numa_info, nr_nodes); return ret; }
int virProcessGetAffinity(pid_t pid, virBitmapPtr *map, int maxcpu) { size_t i; # ifdef CPU_ALLOC /* New method dynamically allocates cpu mask, allowing unlimted cpus */ int numcpus = 1024; size_t masklen; cpu_set_t *mask; /* Not only may the statically allocated cpu_set_t be too small, * but there is no way to ask the kernel what size is large enough. * So you have no option but to pick a size, try, catch EINVAL, * enlarge, and re-try. * * http://lkml.org/lkml/2009/7/28/620 */ realloc: masklen = CPU_ALLOC_SIZE(numcpus); mask = CPU_ALLOC(numcpus); if (!mask) { virReportOOMError(); return -1; } CPU_ZERO_S(masklen, mask); if (sched_getaffinity(pid, masklen, mask) < 0) { CPU_FREE(mask); if (errno == EINVAL && numcpus < (1024 << 8)) { /* 262144 cpus ought to be enough for anyone */ numcpus = numcpus << 2; goto realloc; } virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return -1; } *map = virBitmapNew(maxcpu); if (!*map) return -1; for (i = 0; i < maxcpu; i++) if (CPU_ISSET_S(i, masklen, mask)) ignore_value(virBitmapSetBit(*map, i)); CPU_FREE(mask); # else /* Legacy method uses a fixed size cpu mask, only allows up to 1024 cpus */ cpu_set_t mask; CPU_ZERO(&mask); if (sched_getaffinity(pid, sizeof(mask), &mask) < 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return -1; } for (i = 0; i < maxcpu; i++) if (CPU_ISSET(i, &mask)) ignore_value(virBitmapSetBit(*map, i)); # endif return 0; }
static virCapsPtr buildVirCapabilities(int max_cells, int max_cpus_in_cell, int max_mem_in_cell) { virCapsPtr caps; virCapsHostNUMACellCPUPtr cell_cpus = NULL; virCapsHostNUMACellSiblingInfoPtr siblings = NULL; int core_id, cell_id, nsiblings; int id; size_t i; if ((caps = virCapabilitiesNew(VIR_ARCH_X86_64, 0, 0)) == NULL) goto error; id = 0; for (cell_id = 0; cell_id < max_cells; cell_id++) { if (VIR_ALLOC_N(cell_cpus, max_cpus_in_cell) < 0) goto error; for (core_id = 0; core_id < max_cpus_in_cell; core_id++) { cell_cpus[core_id].id = id; cell_cpus[core_id].socket_id = cell_id; cell_cpus[core_id].core_id = id + core_id; if (!(cell_cpus[core_id].siblings = virBitmapNew(max_cpus_in_cell))) goto error; ignore_value(virBitmapSetBit(cell_cpus[core_id].siblings, id)); } id++; if (VIR_ALLOC_N(siblings, max_cells) < 0) goto error; nsiblings = max_cells; for (i = 0; i < nsiblings; i++) { siblings[i].node = i; /* Some magical constants, see virNumaGetDistances() * for their description. */ siblings[i].distance = cell_id == i ? 10 : 20; } if (virCapabilitiesAddHostNUMACell(caps, cell_id, max_mem_in_cell, max_cpus_in_cell, cell_cpus, nsiblings, siblings, 0, NULL) < 0) goto error; cell_cpus = NULL; siblings = NULL; } return caps; error: virCapabilitiesClearHostNUMACellCPUTopology(cell_cpus, max_cpus_in_cell); VIR_FREE(cell_cpus); VIR_FREE(siblings); virObjectUnref(caps); return NULL; }
virHostCPUParseNode(const char *node, virArch arch, virBitmapPtr present_cpus_map, virBitmapPtr online_cpus_map, int threads_per_subcore, int *sockets, int *cores, int *threads, int *offline) { /* Biggest value we can expect to be used as either socket id * or core id. Bitmaps will need to be sized accordingly */ const int ID_MAX = 4095; int ret = -1; int processors = 0; DIR *cpudir = NULL; struct dirent *cpudirent = NULL; virBitmapPtr node_cpus_map = NULL; virBitmapPtr sockets_map = NULL; virBitmapPtr *cores_maps = NULL; int npresent_cpus = virBitmapSize(present_cpus_map); int sock_max = 0; int sock; int core; size_t i; int siblings; unsigned int cpu; int direrr; *threads = 0; *cores = 0; *sockets = 0; if (!(cpudir = opendir(node))) { virReportSystemError(errno, _("cannot opendir %s"), node); goto cleanup; } /* Keep track of the CPUs that belong to the current node */ if (!(node_cpus_map = virBitmapNew(npresent_cpus))) goto cleanup; /* enumerate sockets in the node */ if (!(sockets_map = virBitmapNew(ID_MAX + 1))) goto cleanup; while ((direrr = virDirRead(cpudir, &cpudirent, node)) > 0) { if (sscanf(cpudirent->d_name, "cpu%u", &cpu) != 1) continue; if (!virBitmapIsBitSet(present_cpus_map, cpu)) continue; /* Mark this CPU as part of the current node */ if (virBitmapSetBit(node_cpus_map, cpu) < 0) goto cleanup; if (!virBitmapIsBitSet(online_cpus_map, cpu)) continue; /* Parse socket */ if ((sock = virHostCPUParseSocket(node, arch, cpu)) < 0) goto cleanup; if (sock > ID_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Socket %d can't be handled (max socket is %d)"), sock, ID_MAX); goto cleanup; } if (virBitmapSetBit(sockets_map, sock) < 0) goto cleanup; if (sock > sock_max) sock_max = sock; } if (direrr < 0) goto cleanup; sock_max++; /* allocate cores maps for each socket */ if (VIR_ALLOC_N(cores_maps, sock_max) < 0) goto cleanup; for (i = 0; i < sock_max; i++) if (!(cores_maps[i] = virBitmapNew(ID_MAX + 1))) goto cleanup; /* Iterate over all CPUs in the node, in ascending order */ for (cpu = 0; cpu < npresent_cpus; cpu++) { /* Skip CPUs that are not part of the current node */ if (!virBitmapIsBitSet(node_cpus_map, cpu)) continue; if (!virBitmapIsBitSet(online_cpus_map, cpu)) { if (threads_per_subcore > 0 && cpu % threads_per_subcore != 0 && virBitmapIsBitSet(online_cpus_map, cpu - (cpu % threads_per_subcore))) { /* Secondary offline threads are counted as online when * subcores are in use and the corresponding primary * thread is online */ processors++; } else { /* But they are counted as offline otherwise */ (*offline)++; } continue; } processors++; /* Parse socket */ if ((sock = virHostCPUParseSocket(node, arch, cpu)) < 0) goto cleanup; if (!virBitmapIsBitSet(sockets_map, sock)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("CPU socket topology has changed")); goto cleanup; } /* Parse core */ if (ARCH_IS_S390(arch)) { /* logical cpu is equivalent to a core on s390 */ core = cpu; } else { if ((core = virHostCPUGetValue(node, cpu, "topology/core_id", 0)) < 0) goto cleanup; } if (core > ID_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Core %d can't be handled (max core is %d)"), core, ID_MAX); goto cleanup; } if (virBitmapSetBit(cores_maps[sock], core) < 0) goto cleanup; if (!(siblings = virHostCPUCountThreadSiblings(node, cpu))) goto cleanup; if (siblings > *threads) *threads = siblings; } /* finalize the returned data */ *sockets = virBitmapCountBits(sockets_map); for (i = 0; i < sock_max; i++) { if (!virBitmapIsBitSet(sockets_map, i)) continue; core = virBitmapCountBits(cores_maps[i]); if (core > *cores) *cores = core; } if (threads_per_subcore > 0) { /* The thread count ignores offline threads, which means that only * only primary threads have been considered so far. If subcores * are in use, we need to also account for secondary threads */ *threads *= threads_per_subcore; } ret = processors; cleanup: /* don't shadow a more serious error */ if (cpudir && closedir(cpudir) < 0 && ret >= 0) { virReportSystemError(errno, _("problem closing %s"), node); ret = -1; } if (cores_maps) for (i = 0; i < sock_max; i++) virBitmapFree(cores_maps[i]); VIR_FREE(cores_maps); virBitmapFree(sockets_map); virBitmapFree(node_cpus_map); return ret; }
int virBitmapParse(const char *str, char sep, virBitmapPtr *bitmap, size_t bitmapSize) { int ret = 0; bool neg = false; const char *cur; char *tmp; int i, start, last; if (!str) return -1; cur = str; virSkipSpaces(&cur); if (*cur == 0) return -1; *bitmap = virBitmapNew(bitmapSize); if (!*bitmap) return -1; while (*cur != 0 && *cur != sep) { /* * 3 constructs are allowed: * - N : a single CPU number * - N-M : a range of CPU numbers with N < M * - ^N : remove a single CPU number from the current set */ if (*cur == '^') { cur++; neg = true; } if (!c_isdigit(*cur)) goto parse_error; if (virStrToLong_i(cur, &tmp, 10, &start) < 0) goto parse_error; if (start < 0) goto parse_error; cur = tmp; virSkipSpaces(&cur); if (*cur == ',' || *cur == 0 || *cur == sep) { if (neg) { if (virBitmapIsSet(*bitmap, start)) { ignore_value(virBitmapClearBit(*bitmap, start)); ret--; } } else { if (!virBitmapIsSet(*bitmap, start)) { ignore_value(virBitmapSetBit(*bitmap, start)); ret++; } } } else if (*cur == '-') { if (neg) goto parse_error; cur++; virSkipSpaces(&cur); if (virStrToLong_i(cur, &tmp, 10, &last) < 0) goto parse_error; if (last < start) goto parse_error; cur = tmp; for (i = start; i <= last; i++) { if (!virBitmapIsSet(*bitmap, i)) { ignore_value(virBitmapSetBit(*bitmap, i)); ret++; } } virSkipSpaces(&cur); } if (*cur == ',') { cur++; virSkipSpaces(&cur); neg = false; } else if(*cur == 0 || *cur == sep) { break; } else { goto parse_error; } } return ret; parse_error: virBitmapFree(*bitmap); *bitmap = NULL; return -1; }
/** * virBitmapParse: * @str: points to a string representing a human-readable bitmap * @terminator: character separating the bitmap to parse * @bitmap: a bitmap created from @str * @bitmapSize: the upper limit of num of bits in created bitmap * * This function is the counterpart of virBitmapFormat. This function creates * a bitmap, in which bits are set according to the content of @str. * * @str is a comma separated string of fields N, which means a number of bit * to set, and ^N, which means to unset the bit, and N-M for ranges of bits * to set. * * To allow parsing of bitmaps within larger strings it is possible to set * a termination character in the argument @terminator. When the character * in @terminator is encountered in @str, the parsing of the bitmap stops. * Pass 0 as @terminator if it is not needed. Whitespace characters may not * be used as terminators. * * Returns the number of bits set in @bitmap, or -1 in case of error. */ int virBitmapParse(const char *str, char terminator, virBitmapPtr *bitmap, size_t bitmapSize) { bool neg = false; const char *cur = str; char *tmp; size_t i; int start, last; if (!(*bitmap = virBitmapNew(bitmapSize))) return -1; if (!str) goto error; virSkipSpaces(&cur); if (*cur == '\0') goto error; while (*cur != 0 && *cur != terminator) { /* * 3 constructs are allowed: * - N : a single CPU number * - N-M : a range of CPU numbers with N < M * - ^N : remove a single CPU number from the current set */ if (*cur == '^') { cur++; neg = true; } if (!c_isdigit(*cur)) goto error; if (virStrToLong_i(cur, &tmp, 10, &start) < 0) goto error; if (start < 0) goto error; cur = tmp; virSkipSpaces(&cur); if (*cur == ',' || *cur == 0 || *cur == terminator) { if (neg) { if (virBitmapClearBit(*bitmap, start) < 0) goto error; } else { if (virBitmapSetBit(*bitmap, start) < 0) goto error; } } else if (*cur == '-') { if (neg) goto error; cur++; virSkipSpaces(&cur); if (virStrToLong_i(cur, &tmp, 10, &last) < 0) goto error; if (last < start) goto error; cur = tmp; for (i = start; i <= last; i++) { if (virBitmapSetBit(*bitmap, i) < 0) goto error; } virSkipSpaces(&cur); } if (*cur == ',') { cur++; virSkipSpaces(&cur); neg = false; } else if (*cur == 0 || *cur == terminator) { break; } else { goto error; } } if (virBitmapIsAllClear(*bitmap)) goto error; return virBitmapCountBits(*bitmap); error: virReportError(VIR_ERR_INVALID_ARG, _("Failed to parse bitmap '%s'"), str); virBitmapFree(*bitmap); *bitmap = NULL; return -1; }
int lxctoolsReadConfig(struct lxc_container* cont, virDomainDefPtr def) { char* item_str = NULL; virNodeInfoPtr nodeinfo = NULL; lxctoolsConffilePtr conffile = NULL; if (VIR_ALLOC(nodeinfo) < 0) { goto error; } if (virCapabilitiesGetNodeInfo(nodeinfo) < 0) { goto error; } if (VIR_ALLOC(conffile) < 0) goto error; if (lxctoolsConffileRead(conffile, cont->config_file_name(cont)) < 0) { virReportError(VIR_ERR_OPERATION_FAILED, "'%s'", _("failed to read conffile")); goto error; } if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.arch")) == NULL) { goto error; } if (item_str[0] != '\0') { if (strcmp(item_str, "x86") == 0 || strcmp(item_str, "i686") == 0) { def->os.arch = VIR_ARCH_I686; } else if (strcmp(item_str, "x86_64") == 0 || strcmp(item_str, "amd64") == 0) { def->os.arch = VIR_ARCH_X86_64; } else { virReportError(VIR_ERR_OPERATION_FAILED, "Unknown architecture '%s'.", item_str); goto error; } } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.cpuset.cpus")) == NULL){ goto error; } if (item_str[0] == '\0') { if (virDomainDefSetVcpusMax(def, nodeinfo->cpus, NULL) < 0) goto error; def->cpumask = virBitmapNew(nodeinfo->cpus); virBitmapSetAll(def->cpumask); } else { int cpunum; if ( (cpunum = virBitmapParse(item_str, &def->cpumask, nodeinfo->cpus) ) < 0) { goto error; } if (virDomainDefSetVcpusMax(def, cpunum, NULL) < 0) goto error; } if (virDomainDefSetVcpus(def, virDomainDefGetVcpusMax(def)) < 0) goto error; VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.cpu.shares")) == NULL) { goto error; } if (item_str[0] != '\0') { unsigned long shares; sscanf(item_str, "%lu", &shares); def->cputune.shares = shares; def->cputune.sharesSpecified = true; } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.cpu.cfs_period_us")) == NULL) { goto error; } if (item_str[0] != '\0') { unsigned long long period; sscanf(item_str, "%llu", &period); def->cputune.period = period; } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.cpu.cfs_quota_us")) == NULL) { goto error; } if (item_str[0] != '\0') { long long quota; sscanf(item_str, "%llu", "a); def->cputune.quota = quota; } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.memory.limit_in_bytes")) == NULL) { goto error; } if (item_str[0] == '\0') { virDomainDefSetMemoryTotal(def, nodeinfo->memory); } else { virDomainDefSetMemoryTotal(def, memToULL(item_str)); } def->mem.cur_balloon = virDomainDefGetMemoryTotal(def); // def->mem.max_memory = nodeinfo->memory; // def->mem.memory_slots = 1; //maybe delete max_memory alltogether VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.memory.soft_limit_in_bytes")) == NULL) { goto error; } if (item_str[0] != '\0') { def->mem.soft_limit = memToULL(item_str); } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.cgroup.cpuset.mems")) == NULL) { goto error; } if (item_str[0] != '\0') { virBitmapPtr nodeset; if (virBitmapParse(item_str, &nodeset, nodeinfo->nodes) < 0) { goto error; } if (virDomainNumatuneSet(def->numa, true, VIR_DOMAIN_NUMATUNE_PLACEMENT_DEFAULT, VIR_DOMAIN_NUMATUNE_MEM_STRICT, nodeset) < 0 ) { goto error; } } VIR_FREE(item_str); item_str = NULL; if ((item_str = lxctoolsConffileGetItem(conffile, "lxc.include")) == NULL) { goto error; } //lxc.include is optional! if (item_str[0] != '\0') { def->metadata = xmlNewNode(NULL, (const xmlChar*) "metadata"); xmlNewTextChild(def->metadata, NULL, (const xmlChar*) (const xmlChar*)"lxctools:include", (const xmlChar*)item_str); } VIR_FREE(item_str); item_str = NULL; if (lxctoolsReadFSConfig(conffile, def) < 0) goto error; if (lxctoolsReadNetConfig(cont, def) < 0) goto error; return 0; error: VIR_FREE(item_str); VIR_FREE(nodeinfo); return -1; }
/* Align def->disks to def->domain. Sort the list of def->disks, * filling in any missing disks or snapshot state defaults given by * the domain, with a fallback to a passed in default. Convert paths * to disk targets for uniformity. Issue an error and return -1 if * any def->disks[n]->name appears more than once or does not map to * dom->disks. If require_match, also ensure that there is no * conflicting requests for both internal and external snapshots. */ int virDomainSnapshotAlignDisks(virDomainSnapshotDefPtr def, int default_snapshot, bool require_match) { int ret = -1; virBitmapPtr map = NULL; int i; int ndisks; bool inuse; if (!def->dom) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing domain in snapshot")); goto cleanup; } if (def->ndisks > def->dom->ndisks) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("too many disk snapshot requests for domain")); goto cleanup; } /* Unlikely to have a guest without disks but technically possible. */ if (!def->dom->ndisks) { ret = 0; goto cleanup; } if (!(map = virBitmapNew(def->dom->ndisks))) { virReportOOMError(); goto cleanup; } /* Double check requested disks. */ for (i = 0; i < def->ndisks; i++) { virDomainSnapshotDiskDefPtr disk = &def->disks[i]; int idx = virDomainDiskIndexByName(def->dom, disk->name, false); int disk_snapshot; if (idx < 0) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("no disk named '%s'"), disk->name); goto cleanup; } if (virBitmapGetBit(map, idx, &inuse) < 0 || inuse) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("disk '%s' specified twice"), disk->name); goto cleanup; } ignore_value(virBitmapSetBit(map, idx)); disk->index = idx; disk_snapshot = def->dom->disks[idx]->snapshot; if (!disk->snapshot) { if (disk_snapshot && (!require_match || disk_snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE)) disk->snapshot = disk_snapshot; else disk->snapshot = default_snapshot; } else if (require_match && disk->snapshot != default_snapshot && !(disk->snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE && disk_snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE)) { const char *tmp; tmp = virDomainSnapshotLocationTypeToString(default_snapshot); virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("disk '%s' must use snapshot mode '%s'"), disk->name, tmp); goto cleanup; } if (disk->file && disk->snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("file '%s' for disk '%s' requires " "use of external snapshot mode"), disk->file, disk->name); goto cleanup; } if (STRNEQ(disk->name, def->dom->disks[idx]->dst)) { VIR_FREE(disk->name); if (!(disk->name = strdup(def->dom->disks[idx]->dst))) { virReportOOMError(); goto cleanup; } } } /* Provide defaults for all remaining disks. */ ndisks = def->ndisks; if (VIR_EXPAND_N(def->disks, def->ndisks, def->dom->ndisks - def->ndisks) < 0) { virReportOOMError(); goto cleanup; } for (i = 0; i < def->dom->ndisks; i++) { virDomainSnapshotDiskDefPtr disk; ignore_value(virBitmapGetBit(map, i, &inuse)); if (inuse) continue; disk = &def->disks[ndisks++]; if (!(disk->name = strdup(def->dom->disks[i]->dst))) { virReportOOMError(); goto cleanup; } disk->index = i; disk->snapshot = def->dom->disks[i]->snapshot; if (!disk->snapshot) disk->snapshot = default_snapshot; } qsort(&def->disks[0], def->ndisks, sizeof(def->disks[0]), disksorter); /* Generate any default external file names, but only if the * backing file is a regular file. */ for (i = 0; i < def->ndisks; i++) { virDomainSnapshotDiskDefPtr disk = &def->disks[i]; if (disk->snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL && !disk->file) { const char *original = def->dom->disks[i]->src; const char *tmp; struct stat sb; if (!original) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("cannot generate external snapshot name " "for disk '%s' without source"), disk->name); goto cleanup; } if (stat(original, &sb) < 0 || !S_ISREG(sb.st_mode)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("source for disk '%s' is not a regular " "file; refusing to generate external " "snapshot name"), disk->name); goto cleanup; } tmp = strrchr(original, '.'); if (!tmp || strchr(tmp, '/')) { ignore_value(virAsprintf(&disk->file, "%s.%s", original, def->name)); } else { if ((tmp - original) > INT_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("integer overflow")); goto cleanup; } ignore_value(virAsprintf(&disk->file, "%.*s.%s", (int) (tmp - original), original, def->name)); } if (!disk->file) { virReportOOMError(); goto cleanup; } } } ret = 0; cleanup: virBitmapFree(map); return ret; }