int qemuSetupGlobalCpuCgroup(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; unsigned long long period = vm->def->cputune.global_period; long long quota = vm->def->cputune.global_quota; char *mem_mask = NULL; virDomainNumatuneMemMode mem_mode; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } /* * If CPU cgroup controller is not initialized here, then we need * neither period nor quota settings. And if CPUSET controller is * not initialized either, then there's nothing to do anyway. */ if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) return 0; if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && virDomainNumatuneMaybeFormatNodeset(vm->def->numa, priv->autoNodeset, &mem_mask, -1) < 0) goto cleanup; if (period || quota) { if (qemuSetupCgroupVcpuBW(priv->cgroup, period, quota) < 0) goto cleanup; } VIR_FREE(mem_mask); return 0; cleanup: VIR_FREE(mem_mask); return -1; }
int qemuSetupCgroupForVcpu(virDomainObjPtr vm) { virCgroupPtr cgroup_vcpu = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDefPtr def = vm->def; size_t i, j; unsigned long long period = vm->def->cputune.period; long long quota = vm->def->cputune.quota; char *mem_mask = NULL; virDomainNumatuneMemMode mem_mode; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } /* * If CPU cgroup controller is not initialized here, then we need * neither period nor quota settings. And if CPUSET controller is * not initialized either, then there's nothing to do anyway. */ if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) return 0; /* We are trying to setup cgroups for CPU pinning, which can also be done * with virProcessSetAffinity, thus the lack of cgroups is not fatal here. */ if (priv->cgroup == NULL) return 0; if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) { /* If we don't know VCPU<->PID mapping or all vcpu runs in the same * thread, we cannot control each vcpu. */ return 0; } if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && virDomainNumatuneMaybeFormatNodeset(vm->def->numa, priv->autoNodeset, &mem_mask, -1) < 0) goto cleanup; for (i = 0; i < priv->nvcpupids; i++) { virCgroupFree(&cgroup_vcpu); if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, i, true, &cgroup_vcpu) < 0) goto cleanup; /* move the thread for vcpu to sub dir */ if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0) goto cleanup; if (period || quota) { if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0) goto cleanup; } /* Set vcpupin in cgroup if vcpupin xml is provided */ if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { virBitmapPtr cpumap = NULL; if (mem_mask && virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0) goto cleanup; /* try to use the default cpu maps */ if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) cpumap = priv->autoCpuset; else cpumap = vm->def->cpumask; /* lookup a more specific pinning info */ for (j = 0; j < def->cputune.nvcpupin; j++) { if (def->cputune.vcpupin[j]->id == i) { cpumap = def->cputune.vcpupin[j]->cpumask; break; } } if (!cpumap) continue; if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumap) < 0) goto cleanup; } } virCgroupFree(&cgroup_vcpu); VIR_FREE(mem_mask); return 0; cleanup: if (cgroup_vcpu) { virCgroupRemove(cgroup_vcpu); virCgroupFree(&cgroup_vcpu); } VIR_FREE(mem_mask); return -1; }
int qemuSetupCgroupForIOThreads(virDomainObjPtr vm) { virCgroupPtr cgroup_iothread = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDefPtr def = vm->def; size_t i; unsigned long long period = vm->def->cputune.period; long long quota = vm->def->cputune.quota; char *mem_mask = NULL; virDomainNumatuneMemMode mem_mode; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } /* * If CPU cgroup controller is not initialized here, then we need * neither period nor quota settings. And if CPUSET controller is * not initialized either, then there's nothing to do anyway. */ if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) return 0; /* We are trying to setup cgroups for CPU pinning, which can also be done * with virProcessSetAffinity, thus the lack of cgroups is not fatal here. */ if (priv->cgroup == NULL) return 0; if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && virDomainNumatuneMaybeFormatNodeset(vm->def->numa, priv->autoNodeset, &mem_mask, -1) < 0) goto cleanup; for (i = 0; i < def->niothreadids; i++) { /* IOThreads are numbered 1..n, although the array is 0..n-1, * so we will account for that here */ if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_IOTHREAD, def->iothreadids[i]->iothread_id, true, &cgroup_iothread) < 0) goto cleanup; /* move the thread for iothread to sub dir */ if (virCgroupAddTask(cgroup_iothread, def->iothreadids[i]->thread_id) < 0) goto cleanup; if (period || quota) { if (qemuSetupCgroupVcpuBW(cgroup_iothread, period, quota) < 0) goto cleanup; } /* Set iothreadpin in cgroup if iothreadpin xml is provided */ if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { virBitmapPtr cpumask = NULL; if (mem_mask && virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0) goto cleanup; if (def->iothreadids[i]->cpumask) cpumask = def->iothreadids[i]->cpumask; else if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) cpumask = priv->autoCpuset; else cpumask = def->cpumask; if (cpumask && qemuSetupCgroupCpusetCpus(cgroup_iothread, cpumask) < 0) goto cleanup; } virCgroupFree(&cgroup_iothread); } VIR_FREE(mem_mask); return 0; cleanup: if (cgroup_iothread) { virCgroupRemove(cgroup_iothread); virCgroupFree(&cgroup_iothread); } VIR_FREE(mem_mask); return -1; }
int qemuSetupCgroupForEmulator(virDomainObjPtr vm) { virBitmapPtr cpumask = NULL; virCgroupPtr cgroup_emulator = NULL; virDomainDefPtr def = vm->def; qemuDomainObjPrivatePtr priv = vm->privateData; unsigned long long period = vm->def->cputune.emulator_period; long long quota = vm->def->cputune.emulator_quota; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } /* * If CPU cgroup controller is not initialized here, then we need * neither period nor quota settings. And if CPUSET controller is * not initialized either, then there's nothing to do anyway. */ if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) return 0; if (priv->cgroup == NULL) return 0; /* Not supported, so claim success */ if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0, true, &cgroup_emulator) < 0) goto cleanup; if (virCgroupMoveTask(priv->cgroup, cgroup_emulator) < 0) goto cleanup; if (def->cputune.emulatorpin) cpumask = def->cputune.emulatorpin; else if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) cpumask = priv->autoCpuset; else if (def->cpumask) cpumask = def->cpumask; if (cpumask) { if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET) && qemuSetupCgroupCpusetCpus(cgroup_emulator, cpumask) < 0) goto cleanup; } if (period || quota) { if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && qemuSetupCgroupVcpuBW(cgroup_emulator, period, quota) < 0) goto cleanup; } virCgroupFree(&cgroup_emulator); return 0; cleanup: if (cgroup_emulator) { virCgroupRemove(cgroup_emulator); virCgroupFree(&cgroup_emulator); } return -1; }
int qemuSetupCgroupForEmulator(virQEMUDriverPtr driver, virDomainObjPtr vm, virBitmapPtr nodemask) { virBitmapPtr cpumask = NULL; virBitmapPtr cpumap = NULL; virCgroupPtr cgroup_emulator = NULL; virDomainDefPtr def = vm->def; qemuDomainObjPrivatePtr priv = vm->privateData; unsigned long long period = vm->def->cputune.emulator_period; long long quota = vm->def->cputune.emulator_quota; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } if (priv->cgroup == NULL) return 0; /* Not supported, so claim success */ if (virCgroupNewEmulator(priv->cgroup, true, &cgroup_emulator) < 0) goto cleanup; if (virCgroupMoveTask(priv->cgroup, cgroup_emulator) < 0) goto cleanup; if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) { if (!(cpumap = qemuPrepareCpumap(driver, nodemask))) goto cleanup; cpumask = cpumap; } else if (def->cputune.emulatorpin) { cpumask = def->cputune.emulatorpin->cpumask; } else if (def->cpumask) { cpumask = def->cpumask; } if (cpumask) { if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET) && qemuSetupCgroupEmulatorPin(cgroup_emulator, cpumask) < 0) goto cleanup; } if (period || quota) { if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && qemuSetupCgroupVcpuBW(cgroup_emulator, period, quota) < 0) goto cleanup; } virCgroupFree(&cgroup_emulator); virBitmapFree(cpumap); return 0; cleanup: virBitmapFree(cpumap); if (cgroup_emulator) { virCgroupRemove(cgroup_emulator); virCgroupFree(&cgroup_emulator); } return -1; }
int qemuSetupCgroupForVcpu(virDomainObjPtr vm) { virCgroupPtr cgroup_vcpu = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDefPtr def = vm->def; size_t i, j; unsigned long long period = vm->def->cputune.period; long long quota = vm->def->cputune.quota; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); return -1; } /* We are trying to setup cgroups for CPU pinning, which can also be done * with virProcessInfoSetAffinity, thus the lack of cgroups is not fatal * here. */ if (priv->cgroup == NULL) return 0; if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) { /* If we don't know VCPU<->PID mapping or all vcpu runs in the same * thread, we cannot control each vcpu. */ VIR_WARN("Unable to get vcpus' pids."); return 0; } for (i = 0; i < priv->nvcpupids; i++) { if (virCgroupNewVcpu(priv->cgroup, i, true, &cgroup_vcpu) < 0) goto cleanup; /* move the thread for vcpu to sub dir */ if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0) goto cleanup; if (period || quota) { if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0) goto cleanup; } /* Set vcpupin in cgroup if vcpupin xml is provided */ if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { /* find the right CPU to pin, otherwise * qemuSetupCgroupVcpuPin will fail. */ for (j = 0; j < def->cputune.nvcpupin; j++) { if (def->cputune.vcpupin[j]->vcpuid != i) continue; if (qemuSetupCgroupVcpuPin(cgroup_vcpu, def->cputune.vcpupin, def->cputune.nvcpupin, i) < 0) goto cleanup; break; } } virCgroupFree(&cgroup_vcpu); } return 0; cleanup: if (cgroup_vcpu) { virCgroupRemove(cgroup_vcpu); virCgroupFree(&cgroup_vcpu); } return -1; }