예제 #1
0
/* Delete a key */
int pthread_key_delete(pthread_key_t key)
{
    pthread_descr self = thread_self();

    __pthread_mutex_lock(&pthread_keys_mutex);
    if (key >= PTHREAD_KEYS_MAX || !pthread_keys[key].in_use) {
	__pthread_mutex_unlock(&pthread_keys_mutex);
	return EINVAL;
    }
    pthread_keys[key].in_use = 0;
    pthread_keys[key].destr = NULL;

    /* Set the value of the key to NULL in all running threads, so
       that if the key is reallocated later by pthread_key_create, its
       associated values will be NULL in all threads.
       Do nothing if no threads have been created yet.  */
    if (__pthread_manager_request != -1)
    {
	pthread_descr th;
	unsigned int idx1st, idx2nd;

	idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
	idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;
	th = self;
	do {
	    /* If the thread already is terminated don't modify the memory.  */
	    if (!th->p_terminated && th->p_specific[idx1st] != NULL)
		th->p_specific[idx1st][idx2nd] = NULL;
	    th = th->p_nextlive;
	} while (th != self);
    }

    __pthread_mutex_unlock(&pthread_keys_mutex);
    return 0;
}
예제 #2
0
파일: ptfork.c 프로젝트: Jaden-J/uClibc
static pid_t __fork(void)
{
  pid_t pid;
  struct handler_list * prepare, * child, * parent;

  __pthread_mutex_lock(&pthread_atfork_lock);
  prepare = pthread_atfork_prepare;
  child = pthread_atfork_child;
  parent = pthread_atfork_parent;
  pthread_call_handlers(prepare);

  __pthread_once_fork_prepare();
#ifdef __MALLOC__
  __pthread_mutex_lock(&__malloc_sbrk_lock);
  __pthread_mutex_lock(&__malloc_heap_lock);
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
  __pthread_mutex_lock(&__malloc_mmb_heap_lock);
#endif
#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
  __pthread_mutex_lock(&__malloc_lock);
#endif

  pid = __libc_fork();
  if (pid == 0) {
#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
    __libc_lock_init_recursive(__malloc_lock);
#elif defined(__MALLOC__)
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
    __libc_lock_init_adaptive(__malloc_mmb_heap_lock);
#endif
    __libc_lock_init_adaptive(__malloc_heap_lock);
    __libc_lock_init(__malloc_sbrk_lock);
#endif
    __libc_lock_init_adaptive(pthread_atfork_lock);
    __pthread_reset_main_thread();
    __fresetlockfiles();
    __pthread_once_fork_child();
    pthread_call_handlers(child);
  } else {
#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
    __pthread_mutex_unlock(&__malloc_lock);
#elif defined(__MALLOC__)
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
    __pthread_mutex_unlock(&__malloc_mmb_heap_lock);
#endif
    __pthread_mutex_unlock(&__malloc_heap_lock);
    __pthread_mutex_unlock(&__malloc_sbrk_lock);
#endif
    __pthread_mutex_unlock(&pthread_atfork_lock);
    __pthread_once_fork_parent();
    pthread_call_handlers(parent);
  }
  return pid;
}
int mtx_unlock(mtx_t *mtx)
{
	/* The only cases where pthread_mutex_unlock can return an
	 * error are undefined behavior for C11 mtx_unlock, so we can
	 * assume it does not return an error and simply tail call. */
	return __pthread_mutex_unlock(mtx);
}
예제 #4
0
void
__funlockfile (FILE *stream)
{
#ifdef USE_IN_LIBIO
  __pthread_mutex_unlock (stream->_lock);
#else
#endif
}
예제 #5
0
int pthread_key_create(pthread_key_t * key, destr_function destr)
{
  int i;

  __pthread_mutex_lock(&pthread_keys_mutex);
  for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
    if (! pthread_keys[i].in_use) {
      /* Mark key in use */
      pthread_keys[i].in_use = 1;
      pthread_keys[i].destr = destr;
      __pthread_mutex_unlock(&pthread_keys_mutex);
      *key = i;
      return 0;
    }
  }
  __pthread_mutex_unlock(&pthread_keys_mutex);
  return EAGAIN;
}
예제 #6
0
void
__pthread_destroy_specific (struct __pthread *thread)
{
  int i;
  int seen_one;

  /* Check if there is any thread specific data.  */
  if (thread->thread_specifics == NULL)
    return;

  __pthread_key_lock_ready ();

  /* Iterate and call the destructors on any thread specific data.  */
  for (;;)
    {
      seen_one = 0;

      __pthread_mutex_lock (&__pthread_key_lock);

      for (i = 0; i < __pthread_key_count && i < thread->thread_specifics_size;
	   i++)
	{
	  void *value;

	  if (__pthread_key_destructors[i] == PTHREAD_KEY_INVALID)
	    continue;

	  value = thread->thread_specifics[i];
	  if (value != NULL)
	    {
	      thread->thread_specifics[i] = 0;

	      if (__pthread_key_destructors[i])
		{
		  seen_one = 1;
		  __pthread_key_destructors[i] (value);
		}
	    }
	}

      __pthread_mutex_unlock (&__pthread_key_lock);

      if (!seen_one)
	break;

      /* This may take a very long time.  Let those blocking on
         pthread_key_create or pthread_key_delete make progress.  */
      sched_yield ();
    }

  free (thread->thread_specifics);
  thread->thread_specifics = 0;
  thread->thread_specifics_size = 0;
}
예제 #7
0
void
__pthread_destroy_specific (struct __pthread *thread)
{
  error_t err;
  int i;
  int seen_one;

  /* Check if there is any thread specific data.  */
  if (! thread->thread_specifics)
    return;

  __pthread_key_lock_ready ();

  /* Iterate and call the destructors on any thread specific data.  */
  for (;;)
    {
      seen_one = 0;

      __pthread_mutex_lock (&__pthread_key_lock);

      for (i = 0; i < __pthread_key_count; i ++)
	{
	  void *value;

	  if (__pthread_key_destructors[i] == PTHREAD_KEY_INVALID)
	    continue;

	  value = hurd_ihash_find (thread->thread_specifics, i);
	  if (value)
	    {
	      err = hurd_ihash_remove (thread->thread_specifics, i);
	      assert (err == 1);

	      if (__pthread_key_destructors[i])
		{
		  seen_one = 1;
		  __pthread_key_destructors[i] (value);
		}
	    }
	}

      __pthread_mutex_unlock (&__pthread_key_lock);

      if (! seen_one)
	break;

      /* This may take a very long time.  Let those blocking on
	 pthread_key_create or pthread_key_delete make progress.  */
      sched_yield ();
    }

  hurd_ihash_free (thread->thread_specifics);
  thread->thread_specifics = 0;
}
예제 #8
0
/* rewinddir() just does an lseek(fd,0,0) - see close for comments */
void rewinddir(DIR * dir)
{
	if (!dir) {
		__set_errno(EBADF);
		return;
	}
#ifdef __UCLIBC_HAS_THREADS__
	__pthread_mutex_lock(&(dir->dd_lock));
#endif
	lseek(dir->dd_fd, 0, SEEK_SET);
	dir->dd_nextoff = dir->dd_nextloc = dir->dd_size = 0;
#ifdef __UCLIBC_HAS_THREADS__
	__pthread_mutex_unlock(&(dir->dd_lock));
#endif
}
예제 #9
0
파일: ptfork.c 프로젝트: Jaden-J/uClibc
int pthread_atfork(void (*prepare)(void),
		     void (*parent)(void),
		     void (*child)(void))
{
  struct handler_list_block * block =
    (struct handler_list_block *) malloc(sizeof(struct handler_list_block));
  if (block == NULL) return ENOMEM;
  __pthread_mutex_lock(&pthread_atfork_lock);
  /* "prepare" handlers are called in LIFO */
  pthread_insert_list(&pthread_atfork_prepare, prepare, &block->prepare, 0);
  /* "parent" handlers are called in FIFO */
  pthread_insert_list(&pthread_atfork_parent, parent, &block->parent, 1);
  /* "child" handlers are called in FIFO */
  pthread_insert_list(&pthread_atfork_child, child, &block->child, 1);
  __pthread_mutex_unlock(&pthread_atfork_lock);
  return 0;
}
예제 #10
0
struct dirent *readdir(DIR * dir)
{
    ssize_t bytes;
    struct dirent *de;

    if (!dir) {
        __set_errno(EBADF);
        return NULL;
    }

#ifdef __UCLIBC_HAS_THREADS__
    __pthread_mutex_lock(&(dir->dd_lock));
#endif

    do {
        if (dir->dd_size <= dir->dd_nextloc) {
            /* read dir->dd_max bytes of directory entries. */
            bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
            if (bytes <= 0) {
                de = NULL;
                goto all_done;
            }
            dir->dd_size = bytes;
            dir->dd_nextloc = 0;
        }

        de = (struct dirent *) (((char *) dir->dd_buf) + dir->dd_nextloc);

        /* Am I right? H.J. */
        dir->dd_nextloc += de->d_reclen;

        /* We have to save the next offset here. */
        dir->dd_nextoff = de->d_off;

        /* Skip deleted files.  */
    } while (de->d_ino == 0);

all_done:
#ifdef __UCLIBC_HAS_THREADS__
    __pthread_mutex_unlock(&(dir->dd_lock));
#endif
    return de;
}
예제 #11
0
파일: regex.c 프로젝트: cgzones/selinux
int regex_match(struct regex_data *regex, char const *subject, int partial)
{
	int rc;
	__pthread_mutex_lock(&regex->match_mutex);
	rc = pcre2_match(
	    regex->regex, (PCRE2_SPTR)subject, PCRE2_ZERO_TERMINATED, 0,
	    partial ? PCRE2_PARTIAL_SOFT : 0, regex->match_data, NULL);
	__pthread_mutex_unlock(&regex->match_mutex);
	if (rc > 0)
		return REGEX_MATCH;
	switch (rc) {
	case PCRE2_ERROR_PARTIAL:
		return REGEX_MATCH_PARTIAL;
	case PCRE2_ERROR_NOMATCH:
		return REGEX_NO_MATCH;
	default:
		return REGEX_ERROR;
	}
}
예제 #12
0
int
__pthread_setcanceltype (int type, int *oldtype)
{
  struct __pthread *p = _pthread_self ();

  switch (type)
    {
    default:
      return EINVAL;
    case PTHREAD_CANCEL_DEFERRED:
    case PTHREAD_CANCEL_ASYNCHRONOUS:
      break;
    }

  __pthread_mutex_lock (&p->cancel_lock);
  if (oldtype)
    *oldtype = p->cancel_type;
  p->cancel_type = type;
  __pthread_mutex_unlock (&p->cancel_lock);

  return 0;
}
예제 #13
0
int
__pthread_setcancelstate (int state, int *oldstate)
{
    struct __pthread *p = _pthread_self ();

    switch (state)
    {
    default:
        return EINVAL;
    case PTHREAD_CANCEL_ENABLE:
    case PTHREAD_CANCEL_DISABLE:
        break;
    }

    __pthread_mutex_lock (&p->cancel_lock);
    if (oldstate)
        *oldstate = p->cancel_state;
    p->cancel_state = state;
    __pthread_mutex_unlock (&p->cancel_lock);

    return 0;
}
예제 #14
0
/* Block on condition variable COND until ABSTIME.  As a GNU
   extension, if ABSTIME is NULL, then wait forever.  MUTEX should be
   held by the calling thread.  On return, MUTEX will be held by the
   calling thread.  */
int
__pthread_cond_timedwait_internal (pthread_cond_t *cond,
				   pthread_mutex_t *mutex,
				   const struct timespec *abstime)
{
  error_t err;
  int cancelled, oldtype, drain;
  clockid_t clock_id = __pthread_default_condattr.__clock;

  if (abstime && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))
    return EINVAL;

  struct __pthread *self = _pthread_self ();
  struct cancel_ctx ctx;
  ctx.wakeup = self;
  ctx.cond = cond;

  /* Test for a pending cancellation request, switch to deferred mode for
     safer resource handling, and prepare the hook to call in case we're
     cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
     hook can be called by another thread at any time.  Whatever happens,
     this function must exit with MUTEX locked.

     This function contains inline implementations of pthread_testcancel and
     pthread_setcanceltype to reduce locking overhead.  */
  __pthread_mutex_lock (&self->cancel_lock);
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;

  if (!cancelled)
    {
      self->cancel_hook = cancel_hook;
      self->cancel_hook_arg = &ctx;
      oldtype = self->cancel_type;

      if (oldtype != PTHREAD_CANCEL_DEFERRED)
	self->cancel_type = PTHREAD_CANCEL_DEFERRED;

      /* Add ourselves to the list of waiters.  This is done while setting
         the cancellation hook to simplify the cancellation procedure, i.e.
         if the thread is queued, it can be cancelled, otherwise it is
         already unblocked, progressing on the return path.  */
      __pthread_spin_lock (&cond->__lock);
      __pthread_enqueue (&cond->__queue, self);
      if (cond->__attr != NULL)
	clock_id = cond->__attr->__clock;
      __pthread_spin_unlock (&cond->__lock);
    }
  __pthread_mutex_unlock (&self->cancel_lock);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  /* Release MUTEX before blocking.  */
  __pthread_mutex_unlock (mutex);

  /* Block the thread.  */
  if (abstime != NULL)
    err = __pthread_timedblock (self, abstime, clock_id);
  else
    {
      err = 0;
      __pthread_block (self);
    }

  __pthread_spin_lock (&cond->__lock);
  if (self->prevp == NULL)
    {
      /* Another thread removed us from the list of waiters, which means a
         wakeup message has been sent.  It was either consumed while we were
         blocking, or queued after we timed out and before we acquired the
         condition lock, in which case the message queue must be drained.  */
      if (!err)
	drain = 0;
      else
	{
	  assert (err == ETIMEDOUT);
	  drain = 1;
	}
    }
  else
    {
      /* We're still in the list of waiters.  Noone attempted to wake us up,
         i.e. we timed out.  */
      assert (err == ETIMEDOUT);
      __pthread_dequeue (self);
      drain = 0;
    }
  __pthread_spin_unlock (&cond->__lock);

  if (drain)
    __pthread_block (self);

  /* We're almost done.  Remove the unblock hook, restore the previous
     cancellation type, and check for a pending cancellation request.  */
  __pthread_mutex_lock (&self->cancel_lock);
  self->cancel_hook = NULL;
  self->cancel_hook_arg = NULL;
  self->cancel_type = oldtype;
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;
  __pthread_mutex_unlock (&self->cancel_lock);

  /* Reacquire MUTEX before returning/cancelling.  */
  __pthread_mutex_lock (mutex);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  return err;
}