void smthread_t::for_each_smthread(SmthreadFunc& f) { SelectSmthreadsFunc g(f); for_each_thread(g); }
static void autogroup_move_group(struct task_struct *p, struct autogroup *ag) { struct autogroup *prev; struct task_struct *t; unsigned long flags; BUG_ON(!lock_task_sighand(p, &flags)); prev = p->signal->autogroup; if (prev == ag) { unlock_task_sighand(p, &flags); return; } p->signal->autogroup = autogroup_kref_get(ag); /* * We can't avoid sched_move_task() after we changed signal->autogroup, * this process can already run with task_group() == prev->tg or we can * race with cgroup code which can read autogroup = prev under rq->lock. * In the latter case for_each_thread() can not miss a migrating thread, * cpu_cgroup_attach() must not be possible after cgroup_exit() and it * can't be removed from thread list, we hold ->siglock. * * If an exiting thread was already removed from thread list we rely on * sched_autogroup_exit_task(). */ for_each_thread(p, t) sched_move_task(t); unlock_task_sighand(p, &flags); autogroup_kref_put(prev); }
static void proc_cont(uint32_t pid) { if (ptrace(PTRACE_CONT, pid, 0, 0) == -1) fatal("%s:%d ptrace (%s)", __FUNCTION__, __LINE__, strerror(errno)); for_each_thread(pid, thread_cont); }
unsigned long read_count(void) { int t; unsigned long sum = 0; for_each_thread(t) sum += atomic_read(&per_thread(counter, t)); return sum; }
int memblocks_available(void) { int i; int sum = globalmem.cur + 1; for_each_thread(i) sum += per_thread(perthreadmem, i).cur + 1; return sum; }
void clear_inferiors (void) { for_each_thread (free_one_thread); all_threads.clear (); clear_dlls (); current_thread = NULL; }
static void proc_stop(uint32_t pid) { int ret; ret = TEMP_FAILURE_RETRY(sem_wait(&shm->sem)); if (!ret) { if (thread_signal(pid, pid, SIGSTOP) != -1) for_each_thread(pid, thread_stop); sem_post(&shm->sem); } }
void *eventual(void *arg) { int t; int sum; while (stopflag < 3) { sum = 0; for_each_thread(t) sum += ACCESS_ONCE(per_thread(counter, t)); ACCESS_ONCE(global_count) = sum; poll(NULL, 0, 1); if (stopflag) { smp_mb(); stopflag++; } } return NULL; }
/* Remove hardware break-/watchpoint. */ static int arm_remove_point (enum raw_bkpt_type type, CORE_ADDR addr, int len, struct raw_breakpoint *bp) { struct process_info *proc = current_process (); struct arm_linux_hw_breakpoint p, *pts; int watch, i, count; watch = arm_linux_hw_point_initialize (type, addr, len, &p); if (watch < 0) { /* Unsupported. */ return -1; } if (watch) { count = arm_linux_get_hw_watchpoint_count (); pts = proc->priv->arch_private->wpts; } else { count = arm_linux_get_hw_breakpoint_count (); pts = proc->priv->arch_private->bpts; } for (i = 0; i < count; i++) if (arm_linux_hw_breakpoint_equal (&p, pts + i)) { pts[i].control = arm_hwbp_control_disable (pts[i].control); /* Only update the threads of the current process. */ for_each_thread (current_thread->id.pid (), [&] (thread_info *thread) { update_registers_callback (thread, watch, i); }); return 0; } /* No watchpoint matched. */ return -1; }