static int process( const char *controller, const char *path, Hashmap *a, Hashmap *b, unsigned iteration, Group **ret) { Group *g; int r; assert(controller); assert(path); assert(a); g = hashmap_get(a, path); if (!g) { g = hashmap_get(b, path); if (!g) { g = new0(Group, 1); if (!g) return -ENOMEM; g->path = strdup(path); if (!g->path) { group_free(g); return -ENOMEM; } r = hashmap_put(a, g->path, g); if (r < 0) { group_free(g); return r; } } else { r = hashmap_move_one(a, b, path); if (r < 0) return r; g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false; } } if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) && IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) { _cleanup_fclose_ FILE *f = NULL; pid_t pid; r = cg_enumerate_processes(controller, path, &f); if (r == -ENOENT) return 0; if (r < 0) return r; g->n_tasks = 0; while (cg_read_pid(f, &pid) > 0) { if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0) continue; g->n_tasks++; } if (g->n_tasks > 0) g->n_tasks_valid = true; } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) { _cleanup_free_ char *p = NULL, *v = NULL; r = cg_get_path(controller, path, "pids.current", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &g->n_tasks); if (r < 0) return r; if (g->n_tasks > 0) g->n_tasks_valid = true; } else if (streq(controller, "cpu") || streq(controller, "cpuacct")) { _cleanup_free_ char *p = NULL, *v = NULL; uint64_t new_usage; nsec_t timestamp; if (cg_all_unified() > 0) { const char *keys[] = { "usage_usec", NULL }; _cleanup_free_ char *val = NULL; if (!streq(controller, "cpu")) return 0; r = cg_get_keyed_attribute("cpu", path, "cpu.stat", keys, &val); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(val, &new_usage); if (r < 0) return r; new_usage *= NSEC_PER_USEC; } else { if (!streq(controller, "cpuacct")) return 0; r = cg_get_path(controller, path, "cpuacct.usage", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &new_usage); if (r < 0) return r; } timestamp = now_nsec(CLOCK_MONOTONIC); if (g->cpu_iteration == iteration - 1 && (nsec_t) new_usage > g->cpu_usage) { nsec_t x, y; x = timestamp - g->cpu_timestamp; if (x < 1) x = 1; y = (nsec_t) new_usage - g->cpu_usage; g->cpu_fraction = (double) y / (double) x; g->cpu_valid = true; } g->cpu_usage = (nsec_t) new_usage; g->cpu_timestamp = timestamp; g->cpu_iteration = iteration; } else if (streq(controller, "memory")) { _cleanup_free_ char *p = NULL, *v = NULL; if (cg_all_unified() <= 0) r = cg_get_path(controller, path, "memory.usage_in_bytes", &p); else r = cg_get_path(controller, path, "memory.current", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &g->memory); if (r < 0) return r; if (g->memory > 0) g->memory_valid = true; } else if ((streq(controller, "io") && cg_all_unified() > 0) || (streq(controller, "blkio") && cg_all_unified() <= 0)) { _cleanup_fclose_ FILE *f = NULL; _cleanup_free_ char *p = NULL; bool unified = cg_all_unified() > 0; uint64_t wr = 0, rd = 0; nsec_t timestamp; r = cg_get_path(controller, path, unified ? "io.stat" : "blkio.io_service_bytes", &p); if (r < 0) return r; f = fopen(p, "re"); if (!f) { if (errno == ENOENT) return 0; return -errno; } for (;;) { char line[LINE_MAX], *l; uint64_t k, *q; if (!fgets(line, sizeof(line), f)) break; /* Trim and skip the device */ l = strstrip(line); l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); if (unified) { while (!isempty(l)) { if (sscanf(l, "rbytes=%" SCNu64, &k)) rd += k; else if (sscanf(l, "wbytes=%" SCNu64, &k)) wr += k; l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); } } else { if (first_word(l, "Read")) { l += 4; q = &rd; } else if (first_word(l, "Write")) { l += 5; q = ≀ } else continue; l += strspn(l, WHITESPACE); r = safe_atou64(l, &k); if (r < 0) continue; *q += k; } } timestamp = now_nsec(CLOCK_MONOTONIC); if (g->io_iteration == iteration - 1) { uint64_t x, yr, yw; x = (uint64_t) (timestamp - g->io_timestamp); if (x < 1) x = 1; if (rd > g->io_input) yr = rd - g->io_input; else yr = 0; if (wr > g->io_output) yw = wr - g->io_output; else yw = 0; if (yr > 0 || yw > 0) { g->io_input_bps = (yr * 1000000000ULL) / x; g->io_output_bps = (yw * 1000000000ULL) / x; g->io_valid = true; } } g->io_input = rd; g->io_output = wr; g->io_timestamp = timestamp; g->io_iteration = iteration; } if (ret) *ret = g; return 0; }
static int process(const char *controller, const char *path, Hashmap *a, Hashmap *b, unsigned iteration) { Group *g; int r; FILE *f = NULL; pid_t pid; unsigned n; assert(controller); assert(path); assert(a); g = hashmap_get(a, path); if (!g) { g = hashmap_get(b, path); if (!g) { g = new0(Group, 1); if (!g) return -ENOMEM; g->path = strdup(path); if (!g->path) { group_free(g); return -ENOMEM; } r = hashmap_put(a, g->path, g); if (r < 0) { group_free(g); return r; } } else { assert_se(hashmap_move_one(a, b, path) == 0); g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false; } } /* Regardless which controller, let's find the maximum number * of processes in any of it */ r = cg_enumerate_processes(controller, path, &f); if (r < 0) return r; n = 0; while (cg_read_pid(f, &pid) > 0) n++; fclose(f); if (n > 0) { if (g->n_tasks_valid) g->n_tasks = MAX(g->n_tasks, n); else g->n_tasks = n; g->n_tasks_valid = true; } if (streq(controller, "cpuacct")) { uint64_t new_usage; char *p, *v; struct timespec ts; r = cg_get_path(controller, path, "cpuacct.usage", &p); if (r < 0) return r; r = read_one_line_file(p, &v); free(p); if (r < 0) return r; r = safe_atou64(v, &new_usage); free(v); if (r < 0) return r; assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0); if (g->cpu_iteration == iteration - 1) { uint64_t x, y; x = ((uint64_t) ts.tv_sec * 1000000000ULL + (uint64_t) ts.tv_nsec) - ((uint64_t) g->cpu_timestamp.tv_sec * 1000000000ULL + (uint64_t) g->cpu_timestamp.tv_nsec); y = new_usage - g->cpu_usage; if (y > 0) { g->cpu_fraction = (double) y / (double) x; g->cpu_valid = true; } } g->cpu_usage = new_usage; g->cpu_timestamp = ts; g->cpu_iteration = iteration; } else if (streq(controller, "memory")) { char *p, *v; r = cg_get_path(controller, path, "memory.usage_in_bytes", &p); if (r < 0) return r; r = read_one_line_file(p, &v); free(p); if (r < 0) return r; r = safe_atou64(v, &g->memory); free(v); if (r < 0) return r; if (g->memory > 0) g->memory_valid = true; } else if (streq(controller, "blkio")) { char *p; uint64_t wr = 0, rd = 0; struct timespec ts; r = cg_get_path(controller, path, "blkio.io_service_bytes", &p); if (r < 0) return r; f = fopen(p, "re"); free(p); if (!f) return -errno; for (;;) { char line[LINE_MAX], *l; uint64_t k, *q; if (!fgets(line, sizeof(line), f)) break; l = strstrip(line); l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); if (first_word(l, "Read")) { l += 4; q = &rd; } else if (first_word(l, "Write")) { l += 5; q = ≀ } else continue; l += strspn(l, WHITESPACE); r = safe_atou64(l, &k); if (r < 0) continue; *q += k; } fclose(f); assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0); if (g->io_iteration == iteration - 1) { uint64_t x, yr, yw; x = ((uint64_t) ts.tv_sec * 1000000000ULL + (uint64_t) ts.tv_nsec) - ((uint64_t) g->io_timestamp.tv_sec * 1000000000ULL + (uint64_t) g->io_timestamp.tv_nsec); yr = rd - g->io_input; yw = wr - g->io_output; if (yr > 0 || yw > 0) { g->io_input_bps = (yr * 1000000000ULL) / x; g->io_output_bps = (yw * 1000000000ULL) / x; g->io_valid = true; } } g->io_input = rd; g->io_output = wr; g->io_timestamp = ts; g->io_iteration = iteration; } return 0; }
int cg_migrate(const char *cfrom, const char *pfrom, const char *cto, const char *pto, bool ignore_self) { bool done = false; _cleanup_set_free_ Set *s = NULL; int r, ret = 0; pid_t my_pid; assert(cfrom); assert(pfrom); assert(cto); assert(pto); s = set_new(NULL); if (!s) return -ENOMEM; my_pid = getpid(); do { _cleanup_fclose_ FILE *f = NULL; pid_t pid = 0; done = true; r = cg_enumerate_processes(cfrom, pfrom, &f); if (r < 0) { if (ret >= 0 && r != -ENOENT) return r; return ret; } while ((r = cg_read_pid(f, &pid)) > 0) { /* This might do weird stuff if we aren't a * single-threaded program. However, we * luckily know we are not */ if (ignore_self && pid == my_pid) continue; if (set_get(s, LONG_TO_PTR(pid)) == LONG_TO_PTR(pid)) continue; r = cg_attach(cto, pto, pid); if (r < 0) { if (ret >= 0 && r != -ESRCH) ret = r; } else if (ret == 0) ret = 1; done = false; r = set_put(s, LONG_TO_PTR(pid)); if (r < 0) { if (ret >= 0) return r; return ret; } } if (r < 0) { if (ret >= 0) return r; return ret; } } while (!done); return ret; }
int cg_kill(const char *controller, const char *path, int sig, bool sigcont, bool ignore_self, Set *s) { bool done = false; int r, ret = 0; pid_t my_pid; FILE *f = NULL; Set *allocated_set = NULL; assert(controller); assert(path); assert(sig >= 0); /* This goes through the tasks list and kills them all. This * is repeated until no further processes are added to the * tasks list, to properly handle forking processes */ if (!s) if (!(s = allocated_set = set_new(trivial_hash_func, trivial_compare_func))) return -ENOMEM; my_pid = getpid(); do { pid_t pid = 0; done = true; if ((r = cg_enumerate_processes(controller, path, &f)) < 0) { if (ret >= 0 && r != -ENOENT) ret = r; goto finish; } while ((r = cg_read_pid(f, &pid)) > 0) { if (pid == my_pid && ignore_self) continue; if (set_get(s, LONG_TO_PTR(pid)) == LONG_TO_PTR(pid)) continue; /* If we haven't killed this process yet, kill * it */ if (kill(pid, sig) < 0) { if (ret >= 0 && errno != ESRCH) ret = -errno; } else if (ret == 0) { if (sigcont) kill(pid, SIGCONT); ret = 1; } done = false; if ((r = set_put(s, LONG_TO_PTR(pid))) < 0) { if (ret >= 0) ret = r; goto finish; } } if (r < 0) { if (ret >= 0) ret = r; goto finish; } fclose(f); f = NULL; /* To avoid racing against processes which fork * quicker than we can kill them we repeat this until * no new pids need to be killed. */ } while (!done); finish: if (allocated_set) set_free(allocated_set); if (f) fclose(f); return ret; }
int cg_kill(const char *controller, const char *path, int sig, bool sigcont, bool ignore_self, Set *s) { _cleanup_set_free_ Set *allocated_set = NULL; bool done = false; int r, ret = 0; pid_t my_pid; assert(sig >= 0); /* This goes through the tasks list and kills them all. This * is repeated until no further processes are added to the * tasks list, to properly handle forking processes */ if (!s) { s = allocated_set = set_new(NULL); if (!s) return -ENOMEM; } my_pid = getpid(); do { _cleanup_fclose_ FILE *f = NULL; pid_t pid = 0; done = true; r = cg_enumerate_processes(controller, path, &f); if (r < 0) { if (ret >= 0 && r != -ENOENT) return r; return ret; } while ((r = cg_read_pid(f, &pid)) > 0) { if (ignore_self && pid == my_pid) continue; if (set_get(s, LONG_TO_PTR(pid)) == LONG_TO_PTR(pid)) continue; /* If we haven't killed this process yet, kill * it */ if (kill(pid, sig) < 0) { if (ret >= 0 && errno != ESRCH) ret = -errno; } else { if (sigcont && sig != SIGKILL) kill(pid, SIGCONT); if (ret == 0) ret = 1; } done = false; r = set_put(s, LONG_TO_PTR(pid)); if (r < 0) { if (ret >= 0) return r; return ret; } } if (r < 0) { if (ret >= 0) return r; return ret; } /* To avoid racing against processes which fork * quicker than we can kill them we repeat this until * no new pids need to be killed. */ } while (!done); return ret; }