/** * virNetDevMacVLanReserveID: * * @id: id 0 - MACVLAN_MAX_ID+1 to reserve (or -1 for "first free") * @flags: set VIR_NETDEV_MACVLAN_CREATE_WITH_TAP for macvtapN else macvlanN * @quietFail: don't log an error if this name is already in-use * @nextFree: reserve the next free ID *after* @id rather than @id itself * * Reserve the indicated ID in the appropriate bitmap, or find the * first free ID if @id is -1. * * Returns newly reserved ID# on success, or -1 to indicate failure. */ static int virNetDevMacVLanReserveID(int id, unsigned int flags, bool quietFail, bool nextFree) { virBitmapPtr bitmap; if (virNetDevMacVLanInitialize() < 0) return -1; bitmap = (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? macvtapIDs : macvlanIDs; if (id > MACVLAN_MAX_ID) { virReportError(VIR_ERR_INTERNAL_ERROR, _("can't use name %s%d - out of range 0-%d"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id, MACVLAN_MAX_ID); return -1; } if ((id < 0 || nextFree) && (id = virBitmapNextClearBit(bitmap, id)) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("no unused %s names available"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX); return -1; } if (virBitmapIsBitSet(bitmap, id)) { if (quietFail) { VIR_INFO("couldn't reserve name %s%d - already in use", (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); } else { virReportError(VIR_ERR_INTERNAL_ERROR, _("couldn't reserve name %s%d - already in use"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); } return -1; } if (virBitmapSetBit(bitmap, id) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("couldn't mark %s%d as used"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); return -1; } VIR_INFO("reserving device %s%d", (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); return id; }
bool virNumaNodeIsAvailable(int node) { bool ret = false; virBitmapPtr map = NULL; if (virFileReadValueBitmap(&map, "%s/node/online", SYSFS_SYSTEM_PATH) < 0) return false; ret = virBitmapIsBitSet(map, node); virBitmapFree(map); return ret; }
int virPortAllocatorAcquire(const virPortAllocatorRange *range, unsigned short *port) { int ret = -1; size_t i; virPortAllocatorPtr pa = virPortAllocatorGet(); *port = 0; if (!pa) return -1; virObjectLock(pa); for (i = range->start; i <= range->end && !*port; i++) { bool used = false, v6used = false; if (virBitmapIsBitSet(pa->bitmap, i)) continue; if (virPortAllocatorBindToPort(&v6used, i, AF_INET6) < 0 || virPortAllocatorBindToPort(&used, i, AF_INET) < 0) goto cleanup; if (!used && !v6used) { /* Add port to bitmap of reserved ports */ if (virBitmapSetBit(pa->bitmap, i) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to reserve port %zu"), i); goto cleanup; } *port = i; ret = 0; } } if (*port == 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unable to find an unused port in range '%s' (%d-%d)"), range->name, range->start, range->end); } cleanup: virObjectUnlock(pa); return ret; }
int virProcessSetAffinity(pid_t pid, virBitmapPtr map) { size_t i; cpuset_t mask; CPU_ZERO(&mask); for (i = 0; i < virBitmapSize(map); i++) { if (virBitmapIsBitSet(map, i)) CPU_SET(i, &mask); } if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, pid, sizeof(mask), &mask) != 0) { virReportSystemError(errno, _("cannot set CPU affinity on process %d"), pid); return -1; } return 0; }
int virPortAllocatorSetUsed(unsigned short port) { int ret = -1; virPortAllocatorPtr pa = virPortAllocatorGet(); if (!pa) return -1; virObjectLock(pa); if (virBitmapIsBitSet(pa->bitmap, port) || virBitmapSetBit(pa->bitmap, port) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to reserve port %d"), port); goto cleanup; } ret = 0; cleanup: virObjectUnlock(pa); return ret; }
/** * virNetDevMacVLanReleaseID: * @id: id 0 - MACVLAN_MAX_ID+1 to release * * Returns 0 for success or -1 for failure. */ static int virNetDevMacVLanReleaseID(int id, unsigned int flags) { virBitmapPtr bitmap; if (virNetDevMacVLanInitialize() < 0) return 0; bitmap = (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? macvtapIDs : macvlanIDs; if (id > MACVLAN_MAX_ID) { virReportError(VIR_ERR_INTERNAL_ERROR, _("can't free name %s%d - out of range 0-%d"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id, MACVLAN_MAX_ID); return -1; } if (id < 0) return 0; VIR_INFO("releasing %sdevice %s%d", virBitmapIsBitSet(bitmap, id) ? "" : "unreserved", (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); if (virBitmapClearBit(bitmap, id) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("couldn't mark %s%d as unused"), (flags & VIR_NETDEV_MACVLAN_CREATE_WITH_TAP) ? MACVTAP_NAME_PREFIX : MACVLAN_NAME_PREFIX, id); return -1; } return 0; }
int virProcessSetAffinity(pid_t pid, virBitmapPtr map) { size_t i; VIR_DEBUG("Set process affinity on %lld\n", (long long)pid); # ifdef CPU_ALLOC /* New method dynamically allocates cpu mask, allowing unlimted cpus */ int numcpus = 1024; size_t masklen; cpu_set_t *mask; /* Not only may the statically allocated cpu_set_t be too small, * but there is no way to ask the kernel what size is large enough. * So you have no option but to pick a size, try, catch EINVAL, * enlarge, and re-try. * * http://lkml.org/lkml/2009/7/28/620 */ realloc: masklen = CPU_ALLOC_SIZE(numcpus); mask = CPU_ALLOC(numcpus); if (!mask) { virReportOOMError(); return -1; } CPU_ZERO_S(masklen, mask); for (i = 0; i < virBitmapSize(map); i++) { if (virBitmapIsBitSet(map, i)) CPU_SET_S(i, masklen, mask); } if (sched_setaffinity(pid, masklen, mask) < 0) { CPU_FREE(mask); if (errno == EINVAL && numcpus < (1024 << 8)) { /* 262144 cpus ought to be enough for anyone */ numcpus = numcpus << 2; goto realloc; } virReportSystemError(errno, _("cannot set CPU affinity on process %d"), pid); return -1; } CPU_FREE(mask); # else /* Legacy method uses a fixed size cpu mask, only allows up to 1024 cpus */ cpu_set_t mask; CPU_ZERO(&mask); for (i = 0; i < virBitmapSize(map); i++) { if (virBitmapIsBitSet(map, i)) CPU_SET(i, &mask); } if (sched_setaffinity(pid, sizeof(mask), &mask) < 0) { virReportSystemError(errno, _("cannot set CPU affinity on process %d"), pid); return -1; } # endif return 0; }
bool dnsmasqCapsGet(dnsmasqCapsPtr caps, dnsmasqCapsFlags flag) { return caps && virBitmapIsBitSet(caps->flags, flag); }
virHostCPUParseNode(const char *node, virArch arch, virBitmapPtr present_cpus_map, virBitmapPtr online_cpus_map, int threads_per_subcore, int *sockets, int *cores, int *threads, int *offline) { /* Biggest value we can expect to be used as either socket id * or core id. Bitmaps will need to be sized accordingly */ const int ID_MAX = 4095; int ret = -1; int processors = 0; DIR *cpudir = NULL; struct dirent *cpudirent = NULL; virBitmapPtr node_cpus_map = NULL; virBitmapPtr sockets_map = NULL; virBitmapPtr *cores_maps = NULL; int npresent_cpus = virBitmapSize(present_cpus_map); int sock_max = 0; int sock; int core; size_t i; int siblings; unsigned int cpu; int direrr; *threads = 0; *cores = 0; *sockets = 0; if (!(cpudir = opendir(node))) { virReportSystemError(errno, _("cannot opendir %s"), node); goto cleanup; } /* Keep track of the CPUs that belong to the current node */ if (!(node_cpus_map = virBitmapNew(npresent_cpus))) goto cleanup; /* enumerate sockets in the node */ if (!(sockets_map = virBitmapNew(ID_MAX + 1))) goto cleanup; while ((direrr = virDirRead(cpudir, &cpudirent, node)) > 0) { if (sscanf(cpudirent->d_name, "cpu%u", &cpu) != 1) continue; if (!virBitmapIsBitSet(present_cpus_map, cpu)) continue; /* Mark this CPU as part of the current node */ if (virBitmapSetBit(node_cpus_map, cpu) < 0) goto cleanup; if (!virBitmapIsBitSet(online_cpus_map, cpu)) continue; /* Parse socket */ if ((sock = virHostCPUParseSocket(node, arch, cpu)) < 0) goto cleanup; if (sock > ID_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Socket %d can't be handled (max socket is %d)"), sock, ID_MAX); goto cleanup; } if (virBitmapSetBit(sockets_map, sock) < 0) goto cleanup; if (sock > sock_max) sock_max = sock; } if (direrr < 0) goto cleanup; sock_max++; /* allocate cores maps for each socket */ if (VIR_ALLOC_N(cores_maps, sock_max) < 0) goto cleanup; for (i = 0; i < sock_max; i++) if (!(cores_maps[i] = virBitmapNew(ID_MAX + 1))) goto cleanup; /* Iterate over all CPUs in the node, in ascending order */ for (cpu = 0; cpu < npresent_cpus; cpu++) { /* Skip CPUs that are not part of the current node */ if (!virBitmapIsBitSet(node_cpus_map, cpu)) continue; if (!virBitmapIsBitSet(online_cpus_map, cpu)) { if (threads_per_subcore > 0 && cpu % threads_per_subcore != 0 && virBitmapIsBitSet(online_cpus_map, cpu - (cpu % threads_per_subcore))) { /* Secondary offline threads are counted as online when * subcores are in use and the corresponding primary * thread is online */ processors++; } else { /* But they are counted as offline otherwise */ (*offline)++; } continue; } processors++; /* Parse socket */ if ((sock = virHostCPUParseSocket(node, arch, cpu)) < 0) goto cleanup; if (!virBitmapIsBitSet(sockets_map, sock)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("CPU socket topology has changed")); goto cleanup; } /* Parse core */ if (ARCH_IS_S390(arch)) { /* logical cpu is equivalent to a core on s390 */ core = cpu; } else { if ((core = virHostCPUGetValue(node, cpu, "topology/core_id", 0)) < 0) goto cleanup; } if (core > ID_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Core %d can't be handled (max core is %d)"), core, ID_MAX); goto cleanup; } if (virBitmapSetBit(cores_maps[sock], core) < 0) goto cleanup; if (!(siblings = virHostCPUCountThreadSiblings(node, cpu))) goto cleanup; if (siblings > *threads) *threads = siblings; } /* finalize the returned data */ *sockets = virBitmapCountBits(sockets_map); for (i = 0; i < sock_max; i++) { if (!virBitmapIsBitSet(sockets_map, i)) continue; core = virBitmapCountBits(cores_maps[i]); if (core > *cores) *cores = core; } if (threads_per_subcore > 0) { /* The thread count ignores offline threads, which means that only * only primary threads have been considered so far. If subcores * are in use, we need to also account for secondary threads */ *threads *= threads_per_subcore; } ret = processors; cleanup: /* don't shadow a more serious error */ if (cpudir && closedir(cpudir) < 0 && ret >= 0) { virReportSystemError(errno, _("problem closing %s"), node); ret = -1; } if (cores_maps) for (i = 0; i < sock_max; i++) virBitmapFree(cores_maps[i]); VIR_FREE(cores_maps); virBitmapFree(sockets_map); virBitmapFree(node_cpus_map); return ret; }
/* Align def->disks to def->domain. Sort the list of def->disks, * filling in any missing disks or snapshot state defaults given by * the domain, with a fallback to a passed in default. Convert paths * to disk targets for uniformity. Issue an error and return -1 if * any def->disks[n]->name appears more than once or does not map to * dom->disks. If require_match, also ensure that there is no * conflicting requests for both internal and external snapshots. */ int virDomainSnapshotAlignDisks(virDomainSnapshotDefPtr def, int default_snapshot, bool require_match) { int ret = -1; virBitmapPtr map = NULL; size_t i; int ndisks; if (!def->dom) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing domain in snapshot")); goto cleanup; } if (def->ndisks > def->dom->ndisks) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("too many disk snapshot requests for domain")); goto cleanup; } /* Unlikely to have a guest without disks but technically possible. */ if (!def->dom->ndisks) { ret = 0; goto cleanup; } if (!(map = virBitmapNew(def->dom->ndisks))) goto cleanup; /* Double check requested disks. */ for (i = 0; i < def->ndisks; i++) { virDomainSnapshotDiskDefPtr disk = &def->disks[i]; int idx = virDomainDiskIndexByName(def->dom, disk->name, false); int disk_snapshot; if (idx < 0) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("no disk named '%s'"), disk->name); goto cleanup; } if (virBitmapIsBitSet(map, idx)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("disk '%s' specified twice"), disk->name); goto cleanup; } ignore_value(virBitmapSetBit(map, idx)); disk->index = idx; disk_snapshot = def->dom->disks[idx]->snapshot; if (!disk->snapshot) { if (disk_snapshot && (!require_match || disk_snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE)) disk->snapshot = disk_snapshot; else disk->snapshot = default_snapshot; } else if (require_match && disk->snapshot != default_snapshot && !(disk->snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE && disk_snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_NONE)) { const char *tmp; tmp = virDomainSnapshotLocationTypeToString(default_snapshot); virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("disk '%s' must use snapshot mode '%s'"), disk->name, tmp); goto cleanup; } if (disk->src->path && disk->snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("file '%s' for disk '%s' requires " "use of external snapshot mode"), disk->src->path, disk->name); goto cleanup; } if (STRNEQ(disk->name, def->dom->disks[idx]->dst)) { VIR_FREE(disk->name); if (VIR_STRDUP(disk->name, def->dom->disks[idx]->dst) < 0) goto cleanup; } } /* Provide defaults for all remaining disks. */ ndisks = def->ndisks; if (VIR_EXPAND_N(def->disks, def->ndisks, def->dom->ndisks - def->ndisks) < 0) goto cleanup; for (i = 0; i < def->dom->ndisks; i++) { virDomainSnapshotDiskDefPtr disk; if (virBitmapIsBitSet(map, i)) continue; disk = &def->disks[ndisks++]; if (VIR_ALLOC(disk->src) < 0) goto cleanup; if (VIR_STRDUP(disk->name, def->dom->disks[i]->dst) < 0) goto cleanup; disk->index = i; /* Don't snapshot empty drives */ if (virStorageSourceIsEmpty(def->dom->disks[i]->src)) disk->snapshot = VIR_DOMAIN_SNAPSHOT_LOCATION_NONE; else disk->snapshot = def->dom->disks[i]->snapshot; disk->src->type = VIR_STORAGE_TYPE_FILE; if (!disk->snapshot) disk->snapshot = default_snapshot; } qsort(&def->disks[0], def->ndisks, sizeof(def->disks[0]), disksorter); /* Generate any default external file names, but only if the * backing file is a regular file. */ for (i = 0; i < def->ndisks; i++) { virDomainSnapshotDiskDefPtr disk = &def->disks[i]; if (disk->snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL && !disk->src->path) { const char *original = virDomainDiskGetSource(def->dom->disks[i]); const char *tmp; struct stat sb; if (disk->src->type != VIR_STORAGE_TYPE_FILE) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("cannot generate external snapshot name " "for disk '%s' on a '%s' device"), disk->name, virStorageTypeToString(disk->src->type)); goto cleanup; } if (!original) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("cannot generate external snapshot name " "for disk '%s' without source"), disk->name); goto cleanup; } if (stat(original, &sb) < 0 || !S_ISREG(sb.st_mode)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("source for disk '%s' is not a regular " "file; refusing to generate external " "snapshot name"), disk->name); goto cleanup; } tmp = strrchr(original, '.'); if (!tmp || strchr(tmp, '/')) { if (virAsprintf(&disk->src->path, "%s.%s", original, def->name) < 0) goto cleanup; } else { if ((tmp - original) > INT_MAX) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("integer overflow")); goto cleanup; } if (virAsprintf(&disk->src->path, "%.*s.%s", (int) (tmp - original), original, def->name) < 0) goto cleanup; } } } ret = 0; cleanup: virBitmapFree(map); return ret; }
int virHostValidateQEMU(void) { virBitmapPtr flags; int ret = 0; bool hasHwVirt = false; virHostMsgCheck("QEMU", "%s", _("for hardware virtualization")); if (!(flags = virHostValidateGetCPUFlags())) return -1; switch (virArchFromHost()) { case VIR_ARCH_I686: case VIR_ARCH_X86_64: if (virBitmapIsBitSet(flags, VIR_HOST_VALIDATE_CPU_FLAG_SVM) || virBitmapIsBitSet(flags, VIR_HOST_VALIDATE_CPU_FLAG_VMX)) hasHwVirt = true; break; case VIR_ARCH_S390: case VIR_ARCH_S390X: if (virBitmapIsBitSet(flags, VIR_HOST_VALIDATE_CPU_FLAG_SIE)) hasHwVirt = true; break; default: hasHwVirt = false; } if (hasHwVirt) { virHostMsgPass(); if (virHostValidateDeviceExists("QEMU", "/dev/kvm", VIR_HOST_VALIDATE_FAIL, _("Check that the 'kvm-intel' or 'kvm-amd' modules are " "loaded & the BIOS has enabled virtualization")) < 0) ret = -1; else if (virHostValidateDeviceAccessible("QEMU", "/dev/kvm", VIR_HOST_VALIDATE_FAIL, _("Check /dev/kvm is world writable or you are in " "a group that is allowed to access it")) < 0) ret = -1; } else { virHostMsgFail(VIR_HOST_VALIDATE_WARN, _("Only emulated CPUs are available, performance will be significantly limited")); } virBitmapFree(flags); if (virHostValidateDeviceExists("QEMU", "/dev/vhost-net", VIR_HOST_VALIDATE_WARN, _("Load the 'vhost_net' module to improve performance " "of virtio networking")) < 0) ret = -1; if (virHostValidateDeviceExists("QEMU", "/dev/net/tun", VIR_HOST_VALIDATE_FAIL, _("Load the 'tun' module to enable networking for QEMU guests")) < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "memory", VIR_HOST_VALIDATE_WARN, "MEMCG") < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "cpu", VIR_HOST_VALIDATE_WARN, "CGROUP_CPU") < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "cpuacct", VIR_HOST_VALIDATE_WARN, "CGROUP_CPUACCT") < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "cpuset", VIR_HOST_VALIDATE_WARN, "CPUSETS") < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "devices", VIR_HOST_VALIDATE_WARN, "CGROUP_DEVICES") < 0) ret = -1; if (virHostValidateCGroupController("QEMU", "blkio", VIR_HOST_VALIDATE_WARN, "BLK_CGROUP") < 0) ret = -1; if (virHostValidateIOMMU("QEMU", VIR_HOST_VALIDATE_WARN) < 0) ret = -1; return ret; }