Пример #1
0
/** Deallocate the memory that was allocated by rwlock_initialize(). */
static void rwlock_cleanup(struct rwlock_info* p)
{
  struct rwlock_thread_info* q;

  tl_assert(p);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] rwlock_destroy   0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 p->a1);
  }

  if (rwlock_is_locked(p))
  {
    RwlockErrInfo REI = { p->a1 };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            RwlockErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Destroying locked rwlock",
                            &REI);
  }

  VG_(OSetGen_ResetIter)(p->thread_info);
  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)); q++)
  {
    vc_cleanup(&q->vc);
  }
  VG_(OSetGen_Destroy)(p->thread_info);
}
Пример #2
0
static inline void __preempt_write_lock(rwlock_t *lock)
{
	if (preempt_count() > 1) {
		_raw_write_lock(lock);
		return;
	}

	do {
		preempt_enable();
		while (rwlock_is_locked(lock))
			cpu_relax();
		preempt_disable();
	} while (!_raw_write_trylock(lock));
}
Пример #3
0
int ptrace_attach(struct task_struct *task)
{
	int retval;

	retval = -EPERM;
	if (task->pid <= 1)
		goto out;
	if (task == current)
		goto out;

repeat:
	/*
	 * Nasty, nasty.
	 *
	 * We want to hold both the task-lock and the
	 * tasklist_lock for writing at the same time.
	 * But that's against the rules (tasklist_lock
	 * is taken for reading by interrupts on other
	 * cpu's that may have task_lock).
	 */
	task_lock(task);
	local_irq_disable();
	if (!write_trylock(&tasklist_lock)) {
		local_irq_enable();
		task_unlock(task);
		do {
			cpu_relax();
		} while (rwlock_is_locked(&tasklist_lock));
		goto repeat;
	}

	if (!task->mm)
		goto bad;
	if(((current->uid != task->euid) ||
	    (current->uid != task->suid) ||
	    (current->uid != task->uid) ||
 	    (current->gid != task->egid) ||
 	    (current->gid != task->sgid) ||
 	    (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
		goto bad;
	rmb();
	if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
		goto bad;
	/* the same process cannot be attached many times */
	if (task->ptrace & PT_PTRACED)
		goto bad;
	retval = security_ptrace(current, task);
	if (retval)
		goto bad;

	/* Go */
	task->ptrace |= PT_PTRACED | ((task->real_parent != current)
				      ? PT_ATTACHED : 0);
	if (capable(CAP_SYS_PTRACE))
		task->ptrace |= PT_PTRACE_CAP;

	__ptrace_link(task, current);

	force_sig_specific(SIGSTOP, task);

bad:
	write_unlock_irq(&tasklist_lock);
	task_unlock(task);
out:
	return retval;
}