/** * Tests the cgroup_attach_cgroup() api under different scenarios * @param retcode error code in case any error is expected from api * @param cgrp the group to assign the task to * @param group1 the name of the group under first (single) mountpoint * @param group2 the name of the group under 2nd moutpoint for multimount * @param i the test number * @param k the message enum number to print the useful message */ void test_cgroup_attach_task(int retcode, struct cgroup *cgrp, const char *group1, const char *group2, pid_t pid, int k, int i) { int retval; char tasksfile[FILENAME_MAX], tasksfile2[FILENAME_MAX]; /* Check, In case some error is expected due to a negative scenario */ if (retcode) { if (pid) retval = cgroup_attach_task_pid(cgrp, pid); else retval = cgroup_attach_task(cgrp); if (retval == retcode) message(i, PASS, "attach_task()", retval, info[k]); else message(i, FAIL, "attach_task()", retval, info[k]); return; } /* Now there is no error and it is a genuine call */ if (pid) retval = cgroup_attach_task_pid(cgrp, pid); else retval = cgroup_attach_task(cgrp); /* API returned success, so perform check */ if (retval == 0) { build_path(tasksfile, mountpoint, group1, "tasks"); if (check_task(tasksfile, 0)) { if (fs_mounted == 2) { /* multiple mounts */ build_path(tasksfile2, mountpoint2, group2, "tasks"); if (check_task(tasksfile2, 0)) { message(i, PASS, "attach_task()", retval, info[TASKINGRP]); } else { message(i, FAIL, "attach_task()", retval, info[TASKNOTINANYGRP]); } } else { /* single mount */ message(i, PASS, "attach_task()", retval, info[TASKINGRP]); } } else { message(i, FAIL, "attach_task()", retval, info[TASKNOTINGRP]); } } else { message(i, FAIL, "attach_task()", retval, (char *)"\n"); } }
/** * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' * @from: attach to all cgroups of a given task * @tsk: the task to be attached */ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) { struct cgroup_root *root; int retval = 0; mutex_lock(&cgroup_mutex); percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; if (root == &cgrp_dfl_root) continue; spin_lock_irq(&css_set_lock); from_cgrp = task_cgroup_from_root(from, root); spin_unlock_irq(&css_set_lock); retval = cgroup_attach_task(from_cgrp, tsk, false); if (retval) break; } percpu_up_write(&cgroup_threadgroup_rwsem); mutex_unlock(&cgroup_mutex); return retval; }
static int cgroup_log_transaction(request_rec *r) { cgroup *mygroup; int ret = 0; if (!cg_enabled) { return DECLINED; } cgroup_config *cgconf = ap_get_module_config(r->server->module_config, &cgroup_module); if (cgconf->relinquish == ACTIVE_OFF) { return DECLINED; } if ((mygroup = cgroup_new_cgroup(cgconf->default_cgroup)) == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r, "Cannot allocate CGroup %s resources: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } else if ((ret = cgroup_get_cgroup(mygroup)) > 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r, "Cannot get CGroup %s: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } else if ((ret = cgroup_attach_task(mygroup)) > 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r, "Cannot assign to CGroup %s: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } return DECLINED; }
static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, bool threadgroup) { struct cgroup *cgrp; struct task_struct *task; const struct cred *cred, *tcred; ssize_t ret; cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; task = cgroup_procs_write_start(buf, threadgroup); ret = PTR_ERR_OR_ZERO(task); if (ret) goto out_unlock; /* * Even if we're attaching all tasks in the thread group, we only * need to check permissions on one of them. */ cred = current_cred(); tcred = get_task_cred(task); if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && !uid_eq(cred->euid, tcred->uid) && !uid_eq(cred->euid, tcred->suid)) ret = -EACCES; put_cred(tcred); if (ret) goto out_finish; ret = cgroup_attach_task(cgrp, task, threadgroup); out_finish: cgroup_procs_write_finish(task); out_unlock: cgroup_kn_unlock(of->kn); return ret ?: nbytes; }
static mrb_value mrb_cgroup_attach(mrb_state *mrb, mrb_value self) { mrb_cgroup_context *mrb_cg_cxt = mrb_cgroup_get_context(mrb, self, "mrb_cgroup_context"); mrb_value pid = mrb_nil_value(); mrb_get_args(mrb, "|i", &pid); if (mrb_nil_p(pid)) { cgroup_attach_task(mrb_cg_cxt->cg); } else { cgroup_attach_task_pid(mrb_cg_cxt->cg, mrb_fixnum(pid)); } mrb_iv_set(mrb , self , mrb_intern_cstr(mrb, "mrb_cgroup_context") , mrb_obj_value(Data_Wrap_Struct(mrb , mrb->object_class , &mrb_cgroup_context_type , (void *)mrb_cg_cxt) ) ); return self; }
static void cgroup_child_init(apr_pool_t *pool, server_rec *server) { cgroup *mygroup; int ret; cgroup_config *cgconf = ap_get_module_config(server->module_config, &cgroup_module); if ((ret = cgroup_init()) > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, server, "Could not initialize CGroups: %s", cgroup_strerror(ret)); } else if ((mygroup = cgroup_new_cgroup(cgconf->default_cgroup)) == NULL) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, server, "Cannot allocate CGroup %s resources: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } else if ((ret = cgroup_get_cgroup(mygroup)) > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, server, "Cannot get CGroup %s: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } else if ((ret = cgroup_attach_task(mygroup)) > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, server, "Cannot assign to CGroup %s: %s", cgconf->default_cgroup, cgroup_strerror(ret)); } else { cg_enabled = 1; cgroup_free(&mygroup); } }
static int corosync_move_to_root_cgroup(void) { int res = -1; #ifdef HAVE_LIBCGROUP int cg_ret; struct cgroup *root_cgroup = NULL; struct cgroup_controller *root_cpu_cgroup_controller = NULL; char *current_cgroup_path = NULL; cg_ret = cgroup_init(); if (cg_ret) { log_printf(LOGSYS_LEVEL_WARNING, "Unable to initialize libcgroup: %s ", cgroup_strerror(cg_ret)); goto exit_res; } cg_ret = cgroup_get_current_controller_path(getpid(), "cpu", ¤t_cgroup_path); if (cg_ret) { log_printf(LOGSYS_LEVEL_WARNING, "Unable to get current cpu cgroup path: %s ", cgroup_strerror(cg_ret)); goto exit_res; } if (strcmp(current_cgroup_path, "/") == 0) { log_printf(LOGSYS_LEVEL_DEBUG, "Corosync is already in root cgroup path"); res = 0; goto exit_res; } root_cgroup = cgroup_new_cgroup("/"); if (root_cgroup == NULL) { log_printf(LOGSYS_LEVEL_WARNING, "Can't create root cgroup"); goto exit_res; } root_cpu_cgroup_controller = cgroup_add_controller(root_cgroup, "cpu"); if (root_cpu_cgroup_controller == NULL) { log_printf(LOGSYS_LEVEL_WARNING, "Can't create root cgroup cpu controller"); goto exit_res; } cg_ret = cgroup_attach_task(root_cgroup); if (cg_ret) { log_printf(LOGSYS_LEVEL_WARNING, "Can't attach task to root cgroup: %s ", cgroup_strerror(cg_ret)); goto exit_res; } cg_ret = cgroup_get_current_controller_path(getpid(), "cpu", ¤t_cgroup_path); if (cg_ret) { log_printf(LOGSYS_LEVEL_WARNING, "Unable to get current cpu cgroup path: %s ", cgroup_strerror(cg_ret)); goto exit_res; } if (strcmp(current_cgroup_path, "/") == 0) { log_printf(LOGSYS_LEVEL_NOTICE, "Corosync successfully moved to root cgroup"); res = 0; } else { log_printf(LOGSYS_LEVEL_WARNING, "Can't move Corosync to root cgroup"); } exit_res: if (root_cgroup != NULL) { cgroup_free(&root_cgroup); } /* * libcgroup doesn't define something like cgroup_fini so there is no way how to clean * it's cache. It has to be called when libcgroup authors decide to implement it. */ #endif return (res); }