예제 #1
0
파일: pthread_kill.c 프로젝트: kraj/glibc
int
__pthread_kill (pthread_t threadid, int signo)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__glibc_unlikely (tid <= 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers,
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  pid_t pid = __getpid ();

  int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, tid, signo);
  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
}
예제 #2
0
int
pthread_setname_np (pthread_t th, const char *name)
{
  const struct pthread *pd = (const struct pthread *) th;

  if (INVALID_TD_P (pd))
    return ESRCH;

  return ENOSYS;
}
예제 #3
0
int
pthread_sigqueue (
     pthread_t threadid,
     int signo,
     const union sigval value)
{
#ifdef __NR_rt_tgsigqueueinfo
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* Set up the siginfo_t structure.  */
  siginfo_t info;
  memset (&info, '\0', sizeof (siginfo_t));
  info.si_signo = signo;
  info.si_code = SI_QUEUE;
  info.si_pid = THREAD_GETMEM (THREAD_SELF, pid);
  info.si_uid = getuid ();
  info.si_value = value;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_sigqueue is not guaranteed to be async-safe.  */
  int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4,
			      THREAD_GETMEM (THREAD_SELF, pid),
			      tid, signo, &info);

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
#else
  return ENOSYS;
#endif
}
예제 #4
0
int
pthread_setaffinity_np (pthread_t th,
                        size_t cpusetsize, const cpu_set_t *cpuset)
{
  const struct pthread *pd = (const struct pthread *) th;

  if (INVALID_TD_P (pd))
    return ESRCH;

  return ENOSYS;
}
예제 #5
0
int
__pthread_kill (
     pthread_t threadid,
     int signo)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_kill is not guaranteed to be async-safe.  */
  int val;
#if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
			  tid, signo);
#else
# ifdef __NR_tgkill
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
			  tid, signo);
  if (INTERNAL_SYSCALL_ERROR_P (val, err)
      && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
    val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo);
#endif

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
}
int
attribute_protected
__pthread_getschedparam (
     pthread_t threadid,
     int *policy,
     struct sched_param *param)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  int result = 0;

  lll_lock (pd->lock, LLL_PRIVATE);

  /* The library is responsible for maintaining the values at all
     times.  If the user uses a interface other than
     pthread_setschedparam to modify the scheduler setting it is not
     the library's problem.  In case the descriptor's values have
     not yet been retrieved do it now.  */
  if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (sched_getparam (pd->tid, &pd->schedparam) != 0)
	result = 1;
      else
	pd->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      pd->schedpolicy = sched_getscheduler (pd->tid);
      if (pd->schedpolicy == -1)
	result = 1;
      else
	pd->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      *policy = pd->schedpolicy;
      memcpy (param, &pd->schedparam, sizeof (struct sched_param));
    }

  lll_unlock (pd->lock, LLL_PRIVATE);

  return result;
}
예제 #7
0
int
pthread_setschedprio (
    pthread_t threadid,
    int prio)
{
    struct pthread *pd = (struct pthread *) threadid;

    /* Make sure the descriptor is valid.  */
    if (INVALID_TD_P (pd))
        /* Not a valid thread handle.  */
        return ESRCH;

    int result = 0;
    struct sched_param param;
    param.sched_priority = prio;

    lll_lock (pd->lock, LLL_PRIVATE);

    /* If the thread should have higher priority because of some
       PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority.  */
    if (__builtin_expect (pd->tpp != NULL, 0) && pd->tpp->priomax > prio)
        param.sched_priority = pd->tpp->priomax;

    /* Try to set the scheduler information.  */
    if (__builtin_expect (sched_setparam (pd->tid, &param) == -1, 0))
        result = errno;
    else
    {
        /* We succeeded changing the kernel information.  Reflect this
        change in the thread descriptor.  */
        param.sched_priority = prio;
        memcpy (&pd->schedparam, &param, sizeof (struct sched_param));
        pd->flags |= ATTR_FLAG_SCHED_SET;
    }

    lll_unlock (pd->lock, LLL_PRIVATE);

    return result;
}
예제 #8
0
파일: pthread_cancel.c 프로젝트: kraj/glibc
int
pthread_cancel (pthread_t th)
{
  volatile struct pthread *pd = (volatile struct pthread *) th;

  /* Make sure the descriptor is valid.  */
  if (INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

#ifdef SHARED
  pthread_cancel_init ();
#endif
  int result = 0;
  int oldval;
  int newval;
  do
    {
    again:
      oldval = pd->cancelhandling;
      newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;

      /* Avoid doing unnecessary work.  The atomic operation can
	 potentially be expensive if the bug has to be locked and
	 remote cache lines have to be invalidated.  */
      if (oldval == newval)
	break;

      /* If the cancellation is handled asynchronously just send a
	 signal.  We avoid this if possible since it's more
	 expensive.  */
      if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
	{
	  /* Mark the cancellation as "in progress".  */
	  if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
						    oldval | CANCELING_BITMASK,
						    oldval))
	    goto again;

#ifdef SIGCANCEL
	  /* The cancellation handler will take care of marking the
	     thread as canceled.  */
	  pid_t pid = getpid ();

	  INTERNAL_SYSCALL_DECL (err);
	  int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid,
					   SIGCANCEL);
	  if (INTERNAL_SYSCALL_ERROR_P (val, err))
	    result = INTERNAL_SYSCALL_ERRNO (val, err);
#else
          /* It should be impossible to get here at all, since
             pthread_setcanceltype should never have allowed
             PTHREAD_CANCEL_ASYNCHRONOUS to be set.  */
          abort ();
#endif

	  break;
	}

	/* A single-threaded process should be able to kill itself, since
	   there is nothing in the POSIX specification that says that it
	   cannot.  So we set multiple_threads to true so that cancellation
	   points get executed.  */
	THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
	__pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
#endif
    }
  /* Mark the thread as canceled.  This has to be done
     atomically since other bits could be modified as well.  */
  while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
					       oldval));

  return result;
}
예제 #9
0
int
pthread_cancel (
    pthread_t th)
{
    volatile struct pthread *pd = (volatile struct pthread *) th;

    /* Make sure the descriptor is valid.  */
    if (INVALID_TD_P (pd))
        /* Not a valid thread handle.  */
        return ESRCH;

#ifdef SHARED
    pthread_cancel_init ();
#endif
    int result = 0;
    int oldval;
    int newval;
    do
    {
again:
        oldval = pd->cancelhandling;
        newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;

        /* Avoid doing unnecessary work.  The atomic operation can
        potentially be expensive if the bug has to be locked and
         remote cache lines have to be invalidated.  */
        if (oldval == newval)
            break;

        /* If the cancellation is handled asynchronously just send a
        signal.  We avoid this if possible since it's more
         expensive.  */
        if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
        {
            /* Mark the cancellation as "in progress".  */
            if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
                    oldval | CANCELING_BITMASK,
                    oldval))
                goto again;

            /* The cancellation handler will take care of marking the
               thread as canceled.  */
            INTERNAL_SYSCALL_DECL (err);

            /* One comment: The PID field in the TCB can temporarily be
               changed (in fork).  But this must not affect this code
               here.  Since this function would have to be called while
               the thread is executing fork, it would have to happen in
               a signal handler.  But this is no allowed, pthread_cancel
               is not guaranteed to be async-safe.  */
            int val;
#if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL
            val = INTERNAL_SYSCALL (tgkill, err, 3,
                                    THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
                                    SIGCANCEL);
#else
# ifdef __NR_tgkill
            val = INTERNAL_SYSCALL (tgkill, err, 3,
                                    THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
                                    SIGCANCEL);
            if (INTERNAL_SYSCALL_ERROR_P (val, err)
                    && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
                val = INTERNAL_SYSCALL (tkill, err, 2, pd->tid, SIGCANCEL);
#endif

            if (INTERNAL_SYSCALL_ERROR_P (val, err))
                result = INTERNAL_SYSCALL_ERRNO (val, err);

            break;
        }
    }
    /* Mark the thread as canceled.  This has to be done
       atomically since other bits could be modified as well.  */
    while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
            oldval));

    return result;
}