int ptrace_attach(struct task_struct *task) { int retval; retval = -EPERM; if (task->pid <= 1) goto out; if (task->tgid == current->tgid) goto out; repeat: /* * Nasty, nasty. * * We want to hold both the task-lock and the * tasklist_lock for writing at the same time. * But that's against the rules (tasklist_lock * is taken for reading by interrupts on other * cpu's that may have task_lock). */ task_lock(task); local_irq_disable(); if (!write_trylock(&tasklist_lock)) { local_irq_enable(); task_unlock(task); do { cpu_relax(); } while (!write_can_lock(&tasklist_lock)); goto repeat; } /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED) goto bad; retval = may_attach(task); if (retval) goto bad; /* Go */ task->ptrace |= PT_PTRACED | ((task->real_parent != current) ? PT_ATTACHED : 0); if (capable(CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); force_sig_specific(SIGSTOP, task); bad: write_unlock_irq(&tasklist_lock); task_unlock(task); out: return retval; }
/* * returns 1 if we get the read lock and 0 if we don't * this won't wait for blocking writers or readers */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { if (atomic_read(&eb->blocking_writers) || atomic_read(&eb->blocking_readers)) return 0; if (!write_trylock(&eb->lock)) return 0; if (atomic_read(&eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); return 0; } atomic_inc(&eb->write_locks); atomic_inc(&eb->spinning_writers); eb->lock_owner = current->pid; return 1; }