SYSCALL_DEFINE3(fairsched_nodemask, unsigned int, id, unsigned int, len, unsigned long __user *, user_mask_ptr) { struct cgroup *cgrp; char name[16]; int retval; nodemask_t new_mask, in_mask; if (!capable_setveid()) return -EPERM; if (id == 0) return -EINVAL; fairsched_name(name, sizeof(name), id); cgrp = cgroup_kernel_open(fairsched_root, 0, name); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); if (cgrp == NULL) return -ENOENT; retval = get_user_node_mask(user_mask_ptr, len, &in_mask); if (retval == 0) { nodes_and(new_mask, in_mask, node_states[N_HIGH_MEMORY]); cgroup_lock(); retval = cgroup_set_nodemask(cgrp, &new_mask); cgroup_unlock(); } cgroup_kernel_close(cgrp); return retval; }
static u64 taskcount_read(struct cgroup *cont, struct cftype *cft) { u64 conut; cgroup_lock(); count = cgroup_task_count(cont); cgroup_unlock(); return count; }
SYSCALL_DEFINE3(fairsched_cpumask, unsigned int, id, unsigned int, len, unsigned long __user *, user_mask_ptr) { struct cgroup *cgrp; int retval; cpumask_var_t new_mask, in_mask; if (!capable_setveid()) return -EPERM; if (id == 0) return -EINVAL; cgrp = fairsched_open(id); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); if (!alloc_cpumask_var(&in_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out; } if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out_free_in_mask; } retval = get_user_cpu_mask(user_mask_ptr, len, in_mask); if (retval == 0) { cpumask_and(new_mask, in_mask, cpu_active_mask); cgroup_lock(); retval = cgroup_set_cpumask(cgrp, new_mask); cgroup_unlock(); } free_cpumask_var(new_mask); out_free_in_mask: free_cpumask_var(in_mask); out: cgroup_kernel_close(cgrp); return retval; }