SYSCALL_DEFINE3(fairsched_nodemask, unsigned int, id, unsigned int, len, unsigned long __user *, user_mask_ptr) { struct cgroup *cgrp; char name[16]; int retval; nodemask_t new_mask, in_mask; if (!capable_setveid()) return -EPERM; if (id == 0) return -EINVAL; fairsched_name(name, sizeof(name), id); cgrp = cgroup_kernel_open(fairsched_root, 0, name); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); if (cgrp == NULL) return -ENOENT; retval = get_user_node_mask(user_mask_ptr, len, &in_mask); if (retval == 0) { nodes_and(new_mask, in_mask, node_states[N_HIGH_MEMORY]); cgroup_lock(); retval = cgroup_set_nodemask(cgrp, &new_mask); cgroup_unlock(); } cgroup_kernel_close(cgrp); return retval; }
unsigned int __init dom0_max_vcpus(void) { unsigned int i, max_vcpus, limit; nodeid_t node; for ( i = 0; i < dom0_nr_pxms; ++i ) if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE ) node_set(node, dom0_nodes); nodes_and(dom0_nodes, dom0_nodes, node_online_map); if ( nodes_empty(dom0_nodes) ) dom0_nodes = node_online_map; for_each_node_mask ( node, dom0_nodes ) cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node)); cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid); if ( cpumask_empty(&dom0_cpus) ) cpumask_copy(&dom0_cpus, cpupool0->cpu_valid); max_vcpus = cpumask_weight(&dom0_cpus); if ( opt_dom0_max_vcpus_min > max_vcpus ) max_vcpus = opt_dom0_max_vcpus_min; if ( opt_dom0_max_vcpus_max < max_vcpus ) max_vcpus = opt_dom0_max_vcpus_max; limit = dom0_pvh ? HVM_MAX_VCPUS : MAX_VIRT_CPUS; if ( max_vcpus > limit ) max_vcpus = limit; return max_vcpus; }