Ejemplo n.º 1
0
int __pthread_mutex_lock(pthread_mutex_t * mutex)
{
  pthread_descr self;

  switch(mutex->__m_kind) {
  case PTHREAD_MUTEX_ADAPTIVE_NP:
    __pthread_lock(&mutex->__m_lock, NULL);
    return 0;
  case PTHREAD_MUTEX_RECURSIVE_NP:
    self = thread_self();
    if (mutex->__m_owner == self) {
      mutex->__m_count++;
      return 0;
    }
    __pthread_lock(&mutex->__m_lock, self);
    mutex->__m_owner = self;
    mutex->__m_count = 0;
    return 0;
  case PTHREAD_MUTEX_ERRORCHECK_NP:
    self = thread_self();
    if (mutex->__m_owner == self) return EDEADLK;
    __pthread_alt_lock(&mutex->__m_lock, self);
    mutex->__m_owner = self;
    return 0;
  case PTHREAD_MUTEX_TIMED_NP:
    __pthread_alt_lock(&mutex->__m_lock, NULL);
    return 0;
  default:
    return EINVAL;
  }
}
Ejemplo n.º 2
0
int
__pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
			      const struct timespec *abstime)
{
  pthread_descr self;
  pthread_extricate_if extr;

  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
    return EINVAL;

  self = thread_self ();

  /* Set up extrication interface */
  extr.pu_object = rwlock;
  extr.pu_extricate_func =  rwlock_wr_extricate_func;

  /* Register extrication interface */
  __pthread_set_own_extricate_if (self, &extr);

  while(1)
    {
      __pthread_lock (&rwlock->__rw_lock, self);

      if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
	{
	  rwlock->__rw_writer = self;
	  __pthread_set_own_extricate_if (self, 0);
	  __pthread_unlock (&rwlock->__rw_lock);
	  return 0;
	}

      /* Suspend ourselves, then try again */
      enqueue (&rwlock->__rw_write_waiting, self);
      __pthread_unlock (&rwlock->__rw_lock);
      /* This is not a cancellation point */
      if (timedsuspend (self, abstime) == 0)
	{
	  int was_on_queue;

	  __pthread_lock (&rwlock->__rw_lock, self);
	  was_on_queue = remove_from_queue (&rwlock->__rw_write_waiting, self);
	  __pthread_unlock (&rwlock->__rw_lock);

	  if (was_on_queue)
	    {
	      __pthread_set_own_extricate_if (self, 0);
	      return ETIMEDOUT;
	    }

	  /* Eat the outstanding restart() from the signaller */
	  suspend (self);
	}
    }
}
Ejemplo n.º 3
0
void __pthread_destroy_specifics()
{
  pthread_descr self = thread_self();
  int i, j, round, found_nonzero;
  destr_function destr;
  void * data;

  for (round = 0, found_nonzero = 1;
       found_nonzero && round < PTHREAD_DESTRUCTOR_ITERATIONS;
       round++) {
    found_nonzero = 0;
    for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++)
      if (THREAD_GETMEM_NC(self, p_specific[i]) != NULL)
        for (j = 0; j < PTHREAD_KEY_2NDLEVEL_SIZE; j++) {
          destr = pthread_keys[i * PTHREAD_KEY_2NDLEVEL_SIZE + j].destr;
          data = THREAD_GETMEM_NC(self, p_specific[i])[j];
          if (destr != NULL && data != NULL) {
            THREAD_GETMEM_NC(self, p_specific[i])[j] = NULL;
            destr(data);
            found_nonzero = 1;
          }
        }
  }
  __pthread_lock(THREAD_GETMEM(self, p_lock), self);
  for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++) {
    if (THREAD_GETMEM_NC(self, p_specific[i]) != NULL) {
      free(THREAD_GETMEM_NC(self, p_specific[i]));
      THREAD_SETMEM_NC(self, p_specific[i], NULL);
    }
  }
  __pthread_unlock(THREAD_GETMEM(self, p_lock));
}
Ejemplo n.º 4
0
int
__pthread_clock_settime (clockid_t clock_id, hp_timing_t offset)
{
  pthread_descr self = thread_self ();
  pthread_t thread = ((unsigned int) clock_id) >> CLOCK_IDFIELD_SIZE;
  const unsigned int mask = ~0U >> CLOCK_IDFIELD_SIZE;

  if (thread == 0 || (THREAD_GETMEM (self, p_tid) & mask) == thread)
    /* Our own clock.  */
    THREAD_SETMEM (self, p_cpuclock_offset, offset);
  else
    {
      pthread_descr th;
      pthread_handle handle = thread_handle (thread);
      __pthread_lock (&handle->h_lock, NULL);
      th = handle->h_descr;
      if (th == NULL || (th->p_tid & mask) != thread || th->p_terminated)
	{
	  __pthread_unlock (&handle->h_lock);
	  __set_errno (EINVAL);
	  return -1;
	}
      th->p_cpuclock_offset = offset;
      __pthread_unlock (&handle->h_lock);
   }

  return 0;
}
Ejemplo n.º 5
0
int pthread_cancel(pthread_t thread)
{
  pthread_handle handle = thread_handle(thread);
  int pid;
  int dorestart = 0;
  pthread_descr th;
  pthread_extricate_if *pextricate;
  int already_canceled;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, thread)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }

  th = handle->h_descr;

  already_canceled = th->p_canceled;
  th->p_canceled = 1;

  if (th->p_cancelstate == PTHREAD_CANCEL_DISABLE || already_canceled) {
    __pthread_unlock(&handle->h_lock);
    return 0;
  }

  pextricate = th->p_extricate;
  pid = th->p_pid;

  /* If the thread has registered an extrication interface, then
     invoke the interface. If it returns 1, then we succeeded in
     dequeuing the thread from whatever waiting object it was enqueued
     with. In that case, it is our responsibility to wake it up.
     And also to set the p_woken_by_cancel flag so the woken thread
     can tell that it was woken by cancellation. */

  if (pextricate != NULL) {
    dorestart = pextricate->pu_extricate_func(pextricate->pu_object, th);
    th->p_woken_by_cancel = dorestart;
  }

  __pthread_unlock(&handle->h_lock);

  /* If the thread has suspended or is about to, then we unblock it by
     issuing a restart, instead of a cancel signal. Otherwise we send
     the cancel signal to unblock the thread from a cancellation point,
     or to initiate asynchronous cancellation. The restart is needed so
     we have proper accounting of restarts; suspend decrements the thread's
     resume count, and restart() increments it.  This also means that suspend's
     handling of the cancel signal is obsolete. */

  if (dorestart)
    restart(th);
  else
    kill(pid, __pthread_sig_cancel);

  return 0;
}
Ejemplo n.º 6
0
int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
			       const struct timespec *abstime)
{
  pthread_descr self;
  int res;

  if (__builtin_expect (abstime->tv_nsec, 0) < 0
      || __builtin_expect (abstime->tv_nsec, 0) >= 1000000000)
    return EINVAL;

  switch(mutex->__m_kind) {
  case PTHREAD_MUTEX_ADAPTIVE_NP:
    __pthread_lock(&mutex->__m_lock, NULL);
    return 0;
  case PTHREAD_MUTEX_RECURSIVE_NP:
    self = thread_self();
    if (mutex->__m_owner == self) {
      mutex->__m_count++;
      return 0;
    }
    __pthread_lock(&mutex->__m_lock, self);
    mutex->__m_owner = self;
    mutex->__m_count = 0;
    return 0;
  case PTHREAD_MUTEX_ERRORCHECK_NP:
    self = thread_self();
    if (mutex->__m_owner == self) return EDEADLK;
    res = __pthread_alt_timedlock(&mutex->__m_lock, self, abstime);
    if (res != 0)
      {
	mutex->__m_owner = self;
	return 0;
      }
    return ETIMEDOUT;
  case PTHREAD_MUTEX_TIMED_NP:
    /* Only this type supports timed out lock. */
    return (__pthread_alt_timedlock(&mutex->__m_lock, NULL, abstime)
	    ? 0 : ETIMEDOUT);
  default:
    return EINVAL;
  }
}
Ejemplo n.º 7
0
static void pthread_free(pthread_descr th)
{
  pthread_handle handle;
  pthread_readlock_info *iter, *next;
#ifndef __ARCH_USE_MMU__
  char *h_bottom_save;
#endif

  /* Make the handle invalid */
  handle =  thread_handle(th->p_tid);
  __pthread_lock(&handle->h_lock, NULL);
#ifndef __ARCH_USE_MMU__
  h_bottom_save = handle->h_bottom;
#endif
  handle->h_descr = NULL;
  handle->h_bottom = (char *)(-1L);
  __pthread_unlock(&handle->h_lock);
#ifdef FREE_THREAD_SELF
  FREE_THREAD_SELF(th, th->p_nr);
#endif
  /* One fewer threads in __pthread_handles */
  __pthread_handles_num--;

  /* Destroy read lock list, and list of free read lock structures.
     If the former is not empty, it means the thread exited while
     holding read locks! */

  for (iter = th->p_readlock_list; iter != NULL; iter = next)
    {
      next = iter->pr_next;
      free(iter);
    }

  for (iter = th->p_readlock_free; iter != NULL; iter = next)
    {
      next = iter->pr_next;
      free(iter);
    }

  /* If initial thread, nothing to free */
  if (th == &__pthread_initial_thread) return;
  if (!th->p_userstack)
    {
#ifdef __ARCH_USE_MMU__
      /* Free the stack and thread descriptor area */
      if (th->p_guardsize != 0)
	munmap(th->p_guardaddr, th->p_guardsize);
      munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE);
#else
      /* For non-MMU systems we always malloc the stack, so free it here. -StS */
      free(h_bottom_save);
#endif /* __ARCH_USE_MMU__ */
    }
}
Ejemplo n.º 8
0
static int rwlock_wr_extricate_func(void *obj, pthread_descr th)
{
  pthread_rwlock_t *rwlock = obj;
  int did_remove = 0;

  __pthread_lock(&rwlock->__rw_lock, NULL);
  did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
  __pthread_unlock(&rwlock->__rw_lock);

  return did_remove;
}
Ejemplo n.º 9
0
static int new_sem_extricate_func(void *obj, pthread_descr th)
{
  __volatile__ pthread_descr self = thread_self();
  sem_t *sem = obj;
  int did_remove = 0;

  __pthread_lock(&sem->__sem_lock, self);
  did_remove = remove_from_queue(&sem->__sem_waiting, th);
  __pthread_unlock(&sem->__sem_lock);

  return did_remove;
}
Ejemplo n.º 10
0
int attribute_noreturn __pthread_manager_event(void *arg)
{
  /* If we have special thread_self processing, initialize it.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
#endif

  /* Get the lock the manager will free once all is correctly set up.  */
  __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
  /* Free it immediately.  */
  __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));

  __pthread_manager(arg);
}
Ejemplo n.º 11
0
static void pthread_exited(pid_t pid)
{
  pthread_descr th;
  int detached;
  /* Find thread with that pid */
  for (th = __pthread_main_thread->p_nextlive;
       th != __pthread_main_thread;
       th = th->p_nextlive) {
    if (th->p_pid == pid) {
      /* Remove thread from list of active threads */
      th->p_nextlive->p_prevlive = th->p_prevlive;
      th->p_prevlive->p_nextlive = th->p_nextlive;
      /* Mark thread as exited, and if detached, free its resources */
      __pthread_lock(th->p_lock, NULL);
      th->p_exited = 1;
      /* If we have to signal this event do it now.  */
      if (th->p_report_events)
	{
	  /* See whether TD_REAP is in any of the mask.  */
	  int idx = __td_eventword (TD_REAP);
	  uint32_t mask = __td_eventmask (TD_REAP);

	  if ((mask & (__pthread_threads_events.event_bits[idx]
		       | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
	    {
	      /* Yep, we have to signal the reapage.  */
	      th->p_eventbuf.eventnum = TD_REAP;
	      th->p_eventbuf.eventdata = th;
	      __pthread_last_event = th;

	      /* Now call the function to signal the event.  */
	      __linuxthreads_reap_event();
	    }
	}
      detached = th->p_detached;
      __pthread_unlock(th->p_lock);
      if (detached)
	pthread_free(th);
      break;
    }
  }
  /* If all threads have exited and the main thread is pending on a
     pthread_exit, wake up the main thread and terminate ourselves. */
  if (main_thread_exiting &&
      __pthread_main_thread->p_nextlive == __pthread_main_thread) {
    restart(__pthread_main_thread);
    /* Same logic as REQ_MAIN_THREAD_EXIT. */
  }
}
Ejemplo n.º 12
0
static int join_extricate_func(void *obj, pthread_descr th attribute_unused)
{
  volatile pthread_descr self = thread_self();
  pthread_handle handle = obj;
  pthread_descr jo;
  int did_remove = 0;

  __pthread_lock(&handle->h_lock, self);
  jo = handle->h_descr;
  did_remove = jo->p_joining != NULL;
  jo->p_joining = NULL;
  __pthread_unlock(&handle->h_lock);

  return did_remove;
}
Ejemplo n.º 13
0
int
__pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
  int result = EBUSY;

  __pthread_lock (&rwlock->__rw_lock, NULL);
  if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
    {
      rwlock->__rw_writer = thread_self ();
      result = 0;
    }
  __pthread_unlock (&rwlock->__rw_lock);

  return result;
}
Ejemplo n.º 14
0
int sem_trywait(sem_t * sem)
{
  int retval;

  __pthread_lock(&sem->__sem_lock, NULL);
  if (sem->__sem_value == 0) {
    __set_errno(EAGAIN);
    retval = -1;
  } else {
    sem->__sem_value--;
    retval = 0;
  }
  __pthread_unlock(&sem->__sem_lock);
  return retval;
}
Ejemplo n.º 15
0
int
__pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
{
  int readers;
  _pthread_descr writer;

  __pthread_lock (&rwlock->__rw_lock, NULL);
  readers = rwlock->__rw_readers;
  writer = rwlock->__rw_writer;
  __pthread_unlock (&rwlock->__rw_lock);

  if (readers > 0 || writer != NULL)
    return EBUSY;

  return 0;
}
Ejemplo n.º 16
0
int pthread_kill(pthread_t thread, int signo)
{
  pthread_handle handle = thread_handle(thread);
  int pid;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, thread)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  pid = handle->h_descr->p_pid;
  __pthread_unlock(&handle->h_lock);
  if (kill(pid, signo) == -1)
    return errno;
  else
    return 0;
}
Ejemplo n.º 17
0
int sem_post(sem_t * sem)
{
  pthread_descr self = thread_self();
  pthread_descr th;
#if 0 // AW11
  struct pthread_request request;
  if (THREAD_GETMEM(self, p_in_sighandler) == NULL)
#endif
  {
    __pthread_lock(&sem->__sem_lock, self);
    if (sem->__sem_waiting == NULL) {
      if (sem->__sem_value >= SEM_VALUE_MAX) {
        /* Overflow */
        __set_errno(ERANGE);
        __pthread_unlock(&sem->__sem_lock);
        return -1;
      }
      sem->__sem_value++;
      __pthread_unlock(&sem->__sem_lock);
    } else {
      th = dequeue(&sem->__sem_waiting);
      __pthread_unlock(&sem->__sem_lock);
      th->p_sem_avail = 1;
      WRITE_MEMORY_BARRIER();
      restart(th);
    }
  }
#if 0 // AW11
  else {
    /* If we're in signal handler, delegate post operation to
       the thread manager. */
    if (__pthread_manager_request < 0) {
      if (__pthread_initialize_manager() < 0) {
        __set_errno(EAGAIN);
        return -1;
      }
    }
    request.req_kind = REQ_POST;
    request.req_args.post = sem;
    TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
					(char *) &request, sizeof(request)));
  }
#endif
  return 0;
}
Ejemplo n.º 18
0
static void pthread_key_delete_helper(void *arg, pthread_descr th)
{
  struct pthread_key_delete_helper_args *args = arg;
  unsigned int idx1st = args->idx1st;
  unsigned int idx2nd = args->idx2nd;
  pthread_descr self = args->self;

  if (self == 0)
    self = args->self = thread_self();

  if (!th->p_terminated) {
    /* pthread_exit() may try to free th->p_specific[idx1st] concurrently. */
    __pthread_lock(THREAD_GETMEM(th, p_lock), self);
    if (th->p_specific[idx1st] != NULL)
      th->p_specific[idx1st][idx2nd] = NULL;
    __pthread_unlock(THREAD_GETMEM(th, p_lock));
  }
}
Ejemplo n.º 19
0
pthread_start_thread_event(void *arg)
{
  pthread_descr self = (pthread_descr) arg;

#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(self, self->p_nr);
#endif
  /* Make sure our pid field is initialized, just in case we get there
     before our father has initialized it. */
  THREAD_SETMEM(self, p_pid, __getpid());
  /* Get the lock the manager will free once all is correctly set up.  */
  __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
  /* Free it immediately.  */
  __pthread_unlock (THREAD_GETMEM(self, p_lock));

  /* Continue with the real function.  */
  pthread_start_thread (arg);
}
Ejemplo n.º 20
0
int pthread_getschedparam(pthread_t thread, int *policy,
                          struct sched_param *param)
{
  pthread_handle handle = thread_handle(thread);
  int pid, pol;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, thread)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  pid = handle->h_descr->p_pid;
  __pthread_unlock(&handle->h_lock);
  pol = sched_getscheduler(pid);
  if (pol == -1) return errno;
  if (sched_getparam(pid, param) == -1) return errno;
  *policy = pol;
  return 0;
}
Ejemplo n.º 21
0
int
__pthread_clock_gettime (clockid_t clock_id, hp_timing_t freq,
			 struct timespec *tp)
{
  hp_timing_t tsc, cpuclock_offset;
  pthread_descr self = thread_self ();
  pthread_t thread = ((unsigned int) clock_id) >> CLOCK_IDFIELD_SIZE;
  const unsigned int mask = ~0U >> CLOCK_IDFIELD_SIZE;

  if (thread == 0 || (THREAD_GETMEM (self, p_tid) & mask) == thread)
    cpuclock_offset = THREAD_GETMEM (self, p_cpuclock_offset);
  else
    {
      pthread_descr th;
      pthread_handle handle = thread_handle (thread);
      __pthread_lock (&handle->h_lock, NULL);
      th = handle->h_descr;
      if (th == NULL || (th->p_tid & mask) != thread || th->p_terminated)
	{
	  __pthread_unlock (&handle->h_lock);
	  __set_errno (EINVAL);
	  return -1;
	}
      cpuclock_offset = th->p_cpuclock_offset;
      __pthread_unlock (&handle->h_lock);
   }

  /* Get the current counter.  */
  HP_TIMING_NOW (tsc);

  /* Compute the offset since the start time of the process.  */
  tsc -= cpuclock_offset;

  /* Compute the seconds.  */
  tp->tv_sec = tsc / freq;

  /* And the nanoseconds.  This computation should be stable until
     we get machines with about 16GHz frequency.  */
  tp->tv_nsec = ((tsc % freq) * 1000000000ull) / freq;

  return 0;
}
Ejemplo n.º 22
0
int
__pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
{
  pthread_descr self = thread_self ();

  while(1)
    {
      __pthread_lock (&rwlock->__rw_lock, self);
      if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
	{
	  rwlock->__rw_writer = self;
	  __pthread_unlock (&rwlock->__rw_lock);
	  return 0;
	}

      /* Suspend ourselves, then try again */
      enqueue (&rwlock->__rw_write_waiting, self);
      __pthread_unlock (&rwlock->__rw_lock);
      suspend (self); /* This is not a cancellation point */
    }
}
Ejemplo n.º 23
0
int
__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
  pthread_descr self = thread_self();
  pthread_readlock_info *existing;
  int out_of_mem, have_lock_already;
  int retval = EBUSY;

  have_lock_already = rwlock_have_already(&self, rwlock,
      &existing, &out_of_mem);

  __pthread_lock (&rwlock->__rw_lock, self);

  /* 0 is passed to here instead of have_lock_already.
     This is to meet Single Unix Spec requirements:
     if writers are waiting, pthread_rwlock_tryrdlock
     does not acquire a read lock, even if the caller has
     one or more read locks already. */

  if (rwlock_can_rdlock(rwlock, 0))
    {
      ++rwlock->__rw_readers;
      retval = 0;
    }

  __pthread_unlock (&rwlock->__rw_lock);

  if (retval == 0)
    {
      if (have_lock_already || out_of_mem)
	{
	  if (existing != NULL)
	    ++existing->pr_lock_count;
	  else
	    ++self->p_untracked_readlock_count;
	}
    }

  return retval;
}
Ejemplo n.º 24
0
int pthread_setschedparam(pthread_t thread, int policy,
                          const struct sched_param *param)
{
  pthread_handle handle = thread_handle(thread);
  pthread_descr th;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, thread)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  th = handle->h_descr;
  if (sched_setscheduler(th->p_pid, policy, param) == -1) {
    __pthread_unlock(&handle->h_lock);
    return errno;
  }
  th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
  __pthread_unlock(&handle->h_lock);
  if (__pthread_manager_request >= 0)
    __pthread_manager_adjust_prio(th->p_priority);
  return 0;
}
Ejemplo n.º 25
0
int
__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
  pthread_descr self = NULL;
  pthread_readlock_info *existing;
  int out_of_mem, have_lock_already;

  have_lock_already = rwlock_have_already(&self, rwlock,
					  &existing, &out_of_mem);

  if (self == NULL)
    self = thread_self ();

  for (;;)
    {
      __pthread_lock (&rwlock->__rw_lock, self);

      if (rwlock_can_rdlock(rwlock, have_lock_already))
	break;

      enqueue (&rwlock->__rw_read_waiting, self);
      __pthread_unlock (&rwlock->__rw_lock);
      suspend (self); /* This is not a cancellation point */
    }

  ++rwlock->__rw_readers;
  __pthread_unlock (&rwlock->__rw_lock);

  if (have_lock_already || out_of_mem)
    {
      if (existing != NULL)
	++existing->pr_lock_count;
      else
	++self->p_untracked_readlock_count;
    }

  return 0;
}
Ejemplo n.º 26
0
int pthread_detach(pthread_t thread_id)
{
  int terminated;
  struct pthread_request request;
  pthread_handle handle = thread_handle(thread_id);
  pthread_descr th;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, thread_id)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  th = handle->h_descr;
  /* If already detached, error */
  if (th->p_detached) {
    __pthread_unlock(&handle->h_lock);
    return EINVAL;
  }
  /* If already joining, don't do anything. */
  if (th->p_joining != NULL) {
    __pthread_unlock(&handle->h_lock);
    return 0;
  }
  /* Mark as detached */
  th->p_detached = 1;
  terminated = th->p_terminated;
  __pthread_unlock(&handle->h_lock);
  /* If already terminated, notify thread manager to reclaim resources */
  if (terminated && __pthread_manager_request >= 0) {
    request.req_thread = thread_self();
    request.req_kind = REQ_FREE;
    request.req_args.free.thread_id = thread_id;
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
		(char *) &request, sizeof(request)));
  }
  return 0;
}
Ejemplo n.º 27
0
static void pthread_handle_free(pthread_t th_id)
{
  pthread_handle handle = thread_handle(th_id);
  pthread_descr th;

  __pthread_lock(&handle->h_lock, NULL);
  if (invalid_handle(handle, th_id)) {
    /* pthread_reap_children has deallocated the thread already,
       nothing needs to be done */
    __pthread_unlock(&handle->h_lock);
    return;
  }
  th = handle->h_descr;
  if (th->p_exited) {
    __pthread_unlock(&handle->h_lock);
    pthread_free(th);
  } else {
    /* The Unix process of the thread is still running.
       Mark the thread as detached so that the thread manager will
       deallocate its resources when the Unix process exits. */
    th->p_detached = 1;
    __pthread_unlock(&handle->h_lock);
  }
}
Ejemplo n.º 28
0
int sem_wait(sem_t * sem)
{
  __volatile__ pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  /* Set up extrication interface */
  extr.pu_object = sem;
  extr.pu_extricate_func = new_sem_extricate_func;

  __pthread_lock(&sem->__sem_lock, self);
  if (sem->__sem_value > 0) {
    sem->__sem_value--;
    __pthread_unlock(&sem->__sem_lock);
    return 0;
  }
  /* Register extrication interface */
  THREAD_SETMEM(self, p_sem_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);
  /* Enqueue only if not already cancelled. */
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&sem->__sem_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&sem->__sem_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  /* Wait for sem_post or cancellation, or fall through if already canceled */
  spurious_wakeup_count = 0;
  while (1)
    {
      suspend(self);
      if (THREAD_GETMEM(self, p_sem_avail) == 0
	  && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
	      || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
	{
	  /* Count resumes that don't belong to us. */
	  spurious_wakeup_count++;
	  continue;
	}
      break;
    }
  __pthread_set_own_extricate_if(self, 0);

  /* Terminate only if the wakeup came from cancellation. */
  /* Otherwise ignore cancellation because we got the semaphore. */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }
  /* We got the semaphore */
  return 0;
}
Ejemplo n.º 29
0
int sem_timedwait(sem_t *sem, const struct timespec *abstime)
{
  pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  __pthread_lock(&sem->__sem_lock, self);
  if (sem->__sem_value > 0) {
    --sem->__sem_value;
    __pthread_unlock(&sem->__sem_lock);
    return 0;
  }

  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
    /* The standard requires that if the function would block and the
       time value is illegal, the function returns with an error.  */
    __pthread_unlock(&sem->__sem_lock);
    __set_errno (EINVAL);
    return -1;
  }

  /* Set up extrication interface */
  extr.pu_object = sem;
  extr.pu_extricate_func = new_sem_extricate_func;

  /* Register extrication interface */
  THREAD_SETMEM(self, p_sem_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);
  /* Enqueue only if not already cancelled. */
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&sem->__sem_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&sem->__sem_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  spurious_wakeup_count = 0;
  while (1)
    {
      if (timedsuspend(self, abstime) == 0) {
	int was_on_queue;

	/* __pthread_lock will queue back any spurious restarts that
	   may happen to it. */

	__pthread_lock(&sem->__sem_lock, self);
	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
	__pthread_unlock(&sem->__sem_lock);

	if (was_on_queue) {
	  __pthread_set_own_extricate_if(self, 0);
	  __set_errno (ETIMEDOUT);
	  return -1;
	}

	/* Eat the outstanding restart() from the signaller */
	suspend(self);
      }

      if (THREAD_GETMEM(self, p_sem_avail) == 0
	  && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
	      || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
	{
	  /* Count resumes that don't belong to us. */
	  spurious_wakeup_count++;
	  continue;
	}
      break;
    }

 __pthread_set_own_extricate_if(self, 0);

  /* Terminate only if the wakeup came from cancellation. */
  /* Otherwise ignore cancellation because we got the semaphore. */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }
  /* We got the semaphore */
  return 0;
}
Ejemplo n.º 30
0
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
				 void * (*start_routine)(void *), void *arg,
				 sigset_t * mask, int father_pid,
				 int report_events,
				 td_thr_events_t *event_maskp)
{
  size_t sseg;
  int pid;
  pthread_descr new_thread;
  char * new_thread_bottom;
  char * new_thread_top;
  pthread_t new_thread_id;
  char *guardaddr = NULL;
  size_t guardsize = 0;
  int pagesize = getpagesize();
  int saved_errno = 0;

  /* First check whether we have to change the policy and if yes, whether
     we can  do this.  Normally this should be done by examining the
     return value of the sched_setscheduler call in pthread_start_thread
     but this is hard to implement.  FIXME  */
  if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
    return EPERM;
  /* Find a free segment for the thread, and allocate a stack if needed */
  for (sseg = 2; ; sseg++)
    {
      if (sseg >= PTHREAD_THREADS_MAX)
	return EAGAIN;
      if (__pthread_handles[sseg].h_descr != NULL)
	continue;
      if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
                                 &new_thread, &new_thread_bottom,
                                 &guardaddr, &guardsize) == 0)
        break;
#ifndef __ARCH_USE_MMU__
      else
        /* When there is MMU, mmap () is used to allocate the stack. If one
         * segment is already mapped, we should continue to see if we can
         * use the next one. However, when there is no MMU, malloc () is used.
         * It's waste of CPU cycles to continue to try if it fails.  */
        return EAGAIN;
#endif
    }
  __pthread_handles_num++;
  /* Allocate new thread identifier */
  pthread_threads_counter += PTHREAD_THREADS_MAX;
  new_thread_id = sseg + pthread_threads_counter;
  /* Initialize the thread descriptor.  Elements which have to be
     initialized to zero already have this value.  */
  new_thread->p_tid = new_thread_id;
  new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
  new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
  new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
  new_thread->p_errnop = &new_thread->p_errno;
  new_thread->p_h_errnop = &new_thread->p_h_errno;
#ifdef __UCLIBC_HAS_XLOCALE__
  /* Initialize thread's locale to the global locale. */
  new_thread->locale = __global_locale;
#endif /* __UCLIBC_HAS_XLOCALE__ */
  new_thread->p_guardaddr = guardaddr;
  new_thread->p_guardsize = guardsize;
  new_thread->p_self = new_thread;
  new_thread->p_nr = sseg;
  /* Initialize the thread handle */
  __pthread_init_lock(&__pthread_handles[sseg].h_lock);
  __pthread_handles[sseg].h_descr = new_thread;
  __pthread_handles[sseg].h_bottom = new_thread_bottom;
  /* Determine scheduling parameters for the thread */
  new_thread->p_start_args.schedpolicy = -1;
  if (attr != NULL) {
    new_thread->p_detached = attr->__detachstate;
    new_thread->p_userstack = attr->__stackaddr_set;

    switch(attr->__inheritsched) {
    case PTHREAD_EXPLICIT_SCHED:
      new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
      memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
	      sizeof (struct sched_param));
      break;
    case PTHREAD_INHERIT_SCHED:
      new_thread->p_start_args.schedpolicy = sched_getscheduler(father_pid);
      sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
      break;
    }
    new_thread->p_priority =
      new_thread->p_start_args.schedparam.sched_priority;
  }
  /* Finish setting up arguments to pthread_start_thread */
  new_thread->p_start_args.start_routine = start_routine;
  new_thread->p_start_args.arg = arg;
  new_thread->p_start_args.mask = *mask;
  /* Raise priority of thread manager if needed */
  __pthread_manager_adjust_prio(new_thread->p_priority);
  /* Do the cloning.  We have to use two different functions depending
     on whether we are debugging or not.  */
  pid = 0;     /* Note that the thread never can have PID zero.  */
  new_thread_top = ((char *)new_thread - THREAD_STACK_OFFSET);

  /* ******************************************************** */
  /*  This code was moved from below to cope with running threads
   *  on uClinux systems.  See comment below...
   * Insert new thread in doubly linked list of active threads */
  new_thread->p_prevlive = __pthread_main_thread;
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
  __pthread_main_thread->p_nextlive = new_thread;
  /* ********************************************************* */

  if (report_events)
    {
      /* See whether the TD_CREATE event bit is set in any of the
         masks.  */
      int idx = __td_eventword (TD_CREATE);
      uint32_t m = __td_eventmask (TD_CREATE);

      if ((m & (__pthread_threads_events.event_bits[idx]
		   | event_maskp->event_bits[idx])) != 0)
	{
	  /* Lock the mutex the child will use now so that it will stop.  */
	  __pthread_lock(new_thread->p_lock, NULL);

	  /* We have to report this event.  */
#ifdef __ia64__
	  pid = __clone2(pthread_start_thread_event, new_thread_top,
			new_thread_top - new_thread_bottom,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
			__pthread_sig_cancel, new_thread);
#else
	  pid = clone(pthread_start_thread_event, new_thread_top,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
			__pthread_sig_cancel, new_thread);
#endif

	  saved_errno = errno;
	  if (pid != -1)
	    {
	      /* Now fill in the information about the new thread in
	         the newly created thread's data structure.  We cannot let
	         the new thread do this since we don't know whether it was
	         already scheduled when we send the event.  */
	      new_thread->p_eventbuf.eventdata = new_thread;
	      new_thread->p_eventbuf.eventnum = TD_CREATE;
	      __pthread_last_event = new_thread;

	      /* We have to set the PID here since the callback function
		 in the debug library will need it and we cannot guarantee
		 the child got scheduled before the debugger.  */
	      new_thread->p_pid = pid;

	      /* Now call the function which signals the event.  */
	      __linuxthreads_create_event ();

	      /* Now restart the thread.  */
	      __pthread_unlock(new_thread->p_lock);
	    }
	}
    }
  if (pid == 0)
    {
      PDEBUG("cloning new_thread = %p\n", new_thread);
#ifdef __ia64__
      pid = __clone2(pthread_start_thread, new_thread_top,
		    new_thread_top - new_thread_bottom,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
		    __pthread_sig_cancel, new_thread);
#else
      pid = clone(pthread_start_thread, new_thread_top,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
		    __pthread_sig_cancel, new_thread);
#endif
      saved_errno = errno;
    }
  /* Check if cloning succeeded */
  if (pid == -1) {
    /********************************************************
     * Code inserted to remove the thread from our list of active
     * threads in case of failure (needed to cope with uClinux),
     * See comment below. */
    new_thread->p_nextlive->p_prevlive = new_thread->p_prevlive;
    new_thread->p_prevlive->p_nextlive = new_thread->p_nextlive;
    /********************************************************/

    /* Free the stack if we allocated it */
    if (attr == NULL || !attr->__stackaddr_set)
      {
#ifdef __ARCH_USE_MMU__
	if (new_thread->p_guardsize != 0)
	  munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
	munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
	       INITIAL_STACK_SIZE);
#else
	free(new_thread_bottom);
#endif /* __ARCH_USE_MMU__ */
      }
    __pthread_handles[sseg].h_descr = NULL;
    __pthread_handles[sseg].h_bottom = NULL;
    __pthread_handles_num--;
    return saved_errno;
  }
  PDEBUG("new thread pid = %d\n", pid);

#if 0
  /* ***********************************************************
   This code has been moved before the call to clone().  In uClinux,
   the use of wait on a semaphore is dependant upon that the child so
   the child must be in the active threads list. This list is used in
   pthread_find_self() to get the pthread_descr of self. So, if the
   child calls sem_wait before this code is executed , it will hang
   forever and initial_thread will instead be posted by a sem_post
   call. */

  /* Insert new thread in doubly linked list of active threads */
  new_thread->p_prevlive = __pthread_main_thread;
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
  __pthread_main_thread->p_nextlive = new_thread;
  /************************************************************/
#endif

  /* Set pid field of the new thread, in case we get there before the
     child starts. */
  new_thread->p_pid = pid;
  /* We're all set */
  *thread = new_thread_id;
  return 0;
}