static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3], nr_runnable = 0; struct cpumask cpus_allowed; int i; rcu_read_lock(); if (task_in_nonroot_cpuacct(current) && in_noninit_pid_ns(current->nsproxy->pid_ns)) { get_avenrun_from_tsk(current, avnrun, FIXED_1/200, 0); cpumask_copy(&cpus_allowed, cpu_possible_mask); if (task_subsys_state(current, cpuset_subsys_id)) { memset(&cpus_allowed, 0, sizeof(cpus_allowed)); get_tsk_cpu_allowed(current, &cpus_allowed); } for_each_cpu_and(i, cpu_possible_mask, &cpus_allowed) nr_runnable += task_ca_running(current, i); } else { get_avenrun(avnrun, FIXED_1/200, 0); nr_runnable = nr_running(); } rcu_read_unlock(); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), nr_running(), nr_threads, task_active_pid_ns(current)->last_pid); return 0; }
struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) { struct cgroup_subsys_state *css = bio->bi_css ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) : task_subsys_state(current, bcache_subsys_id); return css ? container_of(css, struct bch_cgroup, css) : &bcache_default_cgroup; }