Exemplo n.º 1
0
gboolean dt_control_gdk_lock()
{
  /* if current thread equals gui thread do nothing */
  if(pthread_equal(darktable.control->gui_thread, pthread_self()) != 0) return FALSE;

  dt_pthread_mutex_lock(&_control_gdk_lock_threads_mutex);

  /* lets check if current thread has a managed lock */
  if(_control_gdk_lock_mine)
  {
    /* current thread has a lock just do nothing */
    dt_pthread_mutex_unlock(&_control_gdk_lock_threads_mutex);
    return FALSE;
  }

  /* lets lock */
  _control_gdk_lock_mine = TRUE;
  dt_pthread_mutex_unlock(&_control_gdk_lock_threads_mutex);

  /* enter gdk critical section */
  gdk_threads_enter();

  return TRUE;
}
Exemplo n.º 2
0
WebSocket::~WebSocket(){
	
	if(readyState != CLOSED){
		this->fire("close", NULL);
		this->proxy_fire("close", NULL);
		
		readyState = CLOSED;
	}
	CCAssert(!pthread_equal(m_networkThread, pthread_self()), "websocket instance should not release in sub thread!");

	pthread_mutex_lock(&m_responseQueueMutex);
	CCObject* obj;
	CCARRAY_FOREACH(m_responseMessage, obj){
		obj->release();
	}
	m_responseMessage->release();
	pthread_mutex_unlock(&m_responseQueueMutex);
	pthread_mutex_destroy(&m_responseQueueMutex);

	if(NULL != s_pool && s_pool->containsObject(this)){
		pthread_mutex_lock(&s_socketsMutex);
		s_pool->removeObject(this);
		pthread_mutex_unlock(&s_socketsMutex);
	}

	if(NULL != s_pool && s_pool->count() <= 0){
		
		pthread_mutex_lock(&s_requestQueueMutex);

		//s_requestMessageQueue->removeAllObjects();
		CCObject* obj;
		CCARRAY_FOREACH(s_requestMessageQueue, obj){
			s_requestMessageQueue->removeObject(obj);
			JsonData* msg = (JsonData*)obj;
			CC_SAFE_DELETE(msg);
		}
Exemplo n.º 3
0
static void
thread_fini(void)
{
	kthread_t *kt = curthread;

	ASSERT(pthread_equal(kt->t_tid, pthread_self()));
	ASSERT3P(kt->t_func, ==, NULL);

	umem_free(kt, sizeof(kthread_t));

	/* Wait for all threads to exit via thread_exit() */
	VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);

	kthread_nr--; /* Main thread is exiting */

	while (kthread_nr > 0)
		VERIFY3S(pthread_cond_wait(&kthread_cond, &kthread_lock), ==,
		    0);

	ASSERT3S(kthread_nr, ==, 0);
	VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);

	VERIFY3S(pthread_key_delete(kthread_key), ==, 0);
}
Exemplo n.º 4
0
int
gp_monitor_leave(gp_monitor * mona)
{
    pthread_mutex_t * const mon = (pthread_mutex_t *)mona;
    int scode = 0;

#ifdef GS_RECURSIVE_MUTEXATTR
    scode = pthread_mutex_unlock(mon);
#else
    assert(((gp_pthread_recursive_t *)mona)->lcount > 0 && ((gp_pthread_recursive_t *)mona)->self_id != 0);

    if (pthread_equal(pthread_self(),((gp_pthread_recursive_t *)mona)->self_id)) {
      if ((--((gp_pthread_recursive_t *)mona)->lcount) == 0) {
          ((gp_pthread_recursive_t *)mona)->self_id = 0;	/* Not valid unless mutex is locked */
          scode = pthread_mutex_unlock(mon);

      }
    }
    else {
        scode = -1 /* should be EPERM */;
    }
#endif
    return SEM_ERROR_CODE(scode);
}
Exemplo n.º 5
0
/**
 * Function: faceproc_comp_abort
 *
 * Description: Aborts the execution of faceproc
 *
 * Input parameters:
 *   handle - The pointer to the component handle.
 *   p_data - The pointer to the command structure. The structure
 *            for each command type is defined in denoise.h
 *
 * Return values:
 *     IMG_SUCCESS
 *
 * Notes: none
 **/
int faceproc_comp_abort(void *handle, void *p_data)
{
  faceproc_comp_t *p_comp = (faceproc_comp_t *)handle;
  img_component_t *p_base = (img_component_t *)handle;
  int status = IMG_SUCCESS;

  IDBG_HIGH("%s:%d] state %d", __func__, __LINE__, p_base->state);
  pthread_mutex_lock(&p_base->mutex);
  if (IMG_STATE_STARTED != p_base->state) {
    pthread_mutex_unlock(&p_base->mutex);
    return IMG_SUCCESS;
  }
  p_base->state = IMG_STATE_STOP_REQUESTED;
  pthread_mutex_unlock(&p_base->mutex);
  /*signal the thread*/
  img_q_signal(&p_base->inputQ);

  if (!pthread_equal(pthread_self(), p_base->threadid)) {
    IDBG_MED("%s:%d] thread id %d %d", __func__, __LINE__,
      (uint32_t)pthread_self(), (uint32_t)p_base->threadid);
    pthread_join(p_base->threadid, NULL);
  }

  /* destroy the handle */
  status = faceproc_comp_eng_destroy(p_comp);
  if (IMG_ERROR(status)) {
    IDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return status;
  }

  pthread_mutex_lock(&p_base->mutex);
  p_base->state = IMG_STATE_INIT;
  pthread_mutex_unlock(&p_base->mutex);
  IDBG_HIGH("%s:%d] X", __func__, __LINE__);
  return status;
}
Exemplo n.º 6
0
/*
 * Lock for write access, wait until locked (or error).
 *   Multiple nested write locking is permitted.
 */
int rwl_writelock_p(brwlock_t *rwl, const char *file, int line)
{
   int status;

   if (rwl->valid != RWLOCK_VALID) {
      return EINVAL;
   }
   if ((status = pthread_mutex_lock(&rwl->mutex)) != 0) {
      return status;
   }
   if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) {
      rwl->w_active++;
      pthread_mutex_unlock(&rwl->mutex);
      return 0;
   }
   lmgr_pre_lock(rwl, rwl->priority, file, line);
   if (rwl->w_active || rwl->r_active > 0) {
      rwl->w_wait++;                  /* indicate that we are waiting */
      pthread_cleanup_push(rwl_write_release, (void *)rwl);
      while (rwl->w_active || rwl->r_active > 0) {
         if ((status = pthread_cond_wait(&rwl->write, &rwl->mutex)) != 0) {
            lmgr_do_unlock(rwl);
            break;                    /* error, bail out */
         }
      }
      pthread_cleanup_pop(0);
      rwl->w_wait--;                  /* we are no longer waiting */
   }
   if (status == 0) {
      rwl->w_active++;                /* we are running */
      rwl->writer_id = pthread_self(); /* save writer thread's id */
      lmgr_post_lock();
   }
   pthread_mutex_unlock(&rwl->mutex);
   return status;
}
Exemplo n.º 7
0
static void mutex_abandon (gpointer handle, pid_t pid, pthread_t tid)
{
	struct _WapiHandle_mutex *mutex_handle;
	gboolean ok;
	int thr_ret;
	
	ok = _wapi_lookup_handle (handle, WAPI_HANDLE_MUTEX,
				  (gpointer *)&mutex_handle);
	if (ok == FALSE) {
		g_warning ("%s: error looking up mutex handle %p", __func__,
			   handle);
		return;
	}

	pthread_cleanup_push ((void(*)(void *))_wapi_handle_unlock_handle,
			      handle);
	thr_ret = _wapi_handle_lock_handle (handle);
	g_assert (thr_ret == 0);
	
	if (mutex_handle->pid == pid &&
	    pthread_equal (mutex_handle->tid, tid)) {
#ifdef DEBUG
		g_message ("%s: Mutex handle %p abandoned!", __func__, handle);
#endif

		mutex_handle->recursion = 0;
		mutex_handle->pid = 0;
		mutex_handle->tid = 0;
		
		_wapi_handle_set_signal_state (handle, TRUE, FALSE);
	}

	thr_ret = _wapi_handle_unlock_handle (handle);
	g_assert (thr_ret == 0);
	pthread_cleanup_pop (0);
}
Exemplo n.º 8
0
void _mosquitto_destroy(struct mosquitto *mosq)
{
	struct _mosquitto_packet *packet;
	if(!mosq) return;

#ifdef WITH_THREADING
	if(mosq->threaded && !pthread_equal(mosq->thread_id, pthread_self())){
		pthread_cancel(mosq->thread_id);
		pthread_join(mosq->thread_id, NULL);
		mosq->threaded = false;
	}

	if(mosq->id){
		/* If mosq->id is not NULL then the client has already been initialised
		 * and so the mutexes need destroying. If mosq->id is NULL, the mutexes
		 * haven't been initialised. */
		pthread_mutex_destroy(&mosq->callback_mutex);
		pthread_mutex_destroy(&mosq->log_callback_mutex);
		pthread_mutex_destroy(&mosq->state_mutex);
		pthread_mutex_destroy(&mosq->out_packet_mutex);
		pthread_mutex_destroy(&mosq->current_out_packet_mutex);
		pthread_mutex_destroy(&mosq->msgtime_mutex);
		pthread_mutex_destroy(&mosq->in_message_mutex);
		pthread_mutex_destroy(&mosq->out_message_mutex);
		pthread_mutex_destroy(&mosq->mid_mutex);
	}
#endif
	if(mosq->sock != INVALID_SOCKET){
		_mosquitto_socket_close(mosq);
	}
	_mosquitto_message_cleanup_all(mosq);
	_mosquitto_will_clear(mosq);
#ifdef WITH_TLS
	if(mosq->ssl){
		SSL_free(mosq->ssl);
	}
	if(mosq->ssl_ctx){
		SSL_CTX_free(mosq->ssl_ctx);
	}
	if(mosq->tls_cafile) _mosquitto_free(mosq->tls_cafile);
	if(mosq->tls_capath) _mosquitto_free(mosq->tls_capath);
	if(mosq->tls_certfile) _mosquitto_free(mosq->tls_certfile);
	if(mosq->tls_keyfile) _mosquitto_free(mosq->tls_keyfile);
	if(mosq->tls_pw_callback) mosq->tls_pw_callback = NULL;
	if(mosq->tls_version) _mosquitto_free(mosq->tls_version);
	if(mosq->tls_ciphers) _mosquitto_free(mosq->tls_ciphers);
	if(mosq->tls_psk) _mosquitto_free(mosq->tls_psk);
	if(mosq->tls_psk_identity) _mosquitto_free(mosq->tls_psk_identity);
#endif

	if(mosq->address){
		_mosquitto_free(mosq->address);
		mosq->address = NULL;
	}
	if(mosq->id){
		_mosquitto_free(mosq->id);
		mosq->id = NULL;
	}
	if(mosq->username){
		_mosquitto_free(mosq->username);
		mosq->username = NULL;
	}
	if(mosq->password){
		_mosquitto_free(mosq->password);
		mosq->password = NULL;
	}
	if(mosq->host){
		_mosquitto_free(mosq->host);
		mosq->host = NULL;
	}
	if(mosq->bind_address){
		_mosquitto_free(mosq->bind_address);
		mosq->bind_address = NULL;
	}

	/* Out packet cleanup */
	if(mosq->out_packet && !mosq->current_out_packet){
		mosq->current_out_packet = mosq->out_packet;
		mosq->out_packet = mosq->out_packet->next;
	}
	while(mosq->current_out_packet){
		packet = mosq->current_out_packet;
		/* Free data and reset values */
		mosq->current_out_packet = mosq->out_packet;
		if(mosq->out_packet){
			mosq->out_packet = mosq->out_packet->next;
		}

		_mosquitto_packet_cleanup(packet);
		_mosquitto_free(packet);
	}

	_mosquitto_packet_cleanup(&mosq->in_packet);
	if(mosq->sockpairR != INVALID_SOCKET){
		COMPAT_CLOSE(mosq->sockpairR);
		mosq->sockpairR = INVALID_SOCKET;
	}
	if(mosq->sockpairW != INVALID_SOCKET){
		COMPAT_CLOSE(mosq->sockpairW);
		mosq->sockpairW = INVALID_SOCKET;
	}
}
Exemplo n.º 9
0
int
pthread_cancel (pthread_t thread)
     /*
      * ------------------------------------------------------
      * DOCPUBLIC
      *      This function requests cancellation of 'thread'.
      *
      * PARAMETERS
      *      thread
      *              reference to an instance of pthread_t
      *
      *
      * DESCRIPTION
      *      This function requests cancellation of 'thread'.
      *      NOTE: cancellation is asynchronous; use pthread_join to
      *                wait for termination of 'thread' if necessary.
      *
      * RESULTS
      *              0               successfully requested cancellation,
      *              ESRCH           no thread found corresponding to 'thread',
      *              ENOMEM          implicit self thread create failed.
      * ------------------------------------------------------
      */
{
  int result;
  int cancel_self;
  pthread_t self;
  ptw32_thread_t * tp;

  result = pthread_kill (thread, 0);

  if (0 != result)
    {
      return result;
    }

  if ((self = pthread_self ()).p == NULL)
    {
      return ENOMEM;
    };

  /*
   * FIXME!!
   *
   * Can a thread cancel itself?
   *
   * The standard doesn't
   * specify an error to be returned if the target
   * thread is itself.
   *
   * If it may, then we need to ensure that a thread can't
   * deadlock itself trying to cancel itself asyncronously
   * (pthread_cancel is required to be an async-cancel
   * safe function).
   */
  cancel_self = pthread_equal (thread, self);

  tp = (ptw32_thread_t *) thread.p;

  /*
   * Lock for async-cancel safety.
   */
  (void) pthread_mutex_lock (&tp->cancelLock);

  if (tp->cancelType == PTHREAD_CANCEL_ASYNCHRONOUS
      && tp->cancelState == PTHREAD_CANCEL_ENABLE
      && tp->state < PThreadStateCanceling)
    {
      if (cancel_self)
	{
	  tp->state = PThreadStateCanceling;
	  tp->cancelState = PTHREAD_CANCEL_DISABLE;

	  (void) pthread_mutex_unlock (&tp->cancelLock);
	  ptw32_throw (PTW32_EPS_CANCEL);

	  /* Never reached */
	}
      else
	{
	  HANDLE threadH = tp->threadH;

	  SuspendThread (threadH);

	  if (WaitForSingleObject (threadH, 0) == WAIT_TIMEOUT)
	    {
	      tp->state = PThreadStateCanceling;
	      tp->cancelState = PTHREAD_CANCEL_DISABLE;
	      /*
	       * If alertdrv and QueueUserAPCEx is available then the following
	       * will result in a call to QueueUserAPCEx with the args given, otherwise
	       * this will result in a call to ptw32_RegisterCancelation and only
	       * the threadH arg will be used.
	       */
	      ptw32_register_cancelation (ptw32_cancel_callback, threadH, 0);
	      (void) pthread_mutex_unlock (&tp->cancelLock);
	      //ResumeThread (threadH);
	    }
	}
    }
  else
    {
      /*
       * Set for deferred cancellation.
       */
      if (tp->state < PThreadStateCancelPending)
	{
	  tp->state = PThreadStateCancelPending;
	  if (!SetEvent (tp->cancelEvent))
	    {
	      result = ESRCH;
	    }
	}
      else if (tp->state >= PThreadStateCanceling)
	{
	  result = ESRCH;
	}

      (void) pthread_mutex_unlock (&tp->cancelLock);
    }
  return (result);
}
Exemplo n.º 10
0
bool CThread::IsCurrentThread(const ThreadIdentifier tid)
{
  return pthread_equal(pthread_self(), tid);
}
inline bool equal_systemwide_thread_id(const OS_systemwide_thread_id_t &id1, const OS_systemwide_thread_id_t &id2)
{
   return (0 != pthread_equal(id1.tid, id2.tid)) && (id1.pid == id2.pid);
}
Exemplo n.º 12
0
/// Returns true, if outside the thread.
static bool need_safe_call() { return pthread_equal(view_thread->thread, pthread_self()) == 0; }
Exemplo n.º 13
0
	/*! Thread inequivalence operator.
	  \param that the other thread id
	  \return true if the threads are unequal */
	bool operator!=(const _threadbase& that) const { return !pthread_equal(_tid, that._tid); }
tEplKernel PUBLIC EplApiProcessImageExchange(
    tEplApiProcessImageCopyJob* pCopyJob_p)
{
    tEplKernel                      Ret = kEplSuccessful;
    tEplApiProcessImageCopyJobInt   IntCopyJob;

#if (TARGET_SYSTEM == _LINUX_) && defined(__KERNEL__)
    if (EplApiProcessImageInstance_g.m_pCurrentTask == get_current())
#elif (TARGET_SYSTEM == _LINUX_) && !defined(__KERNEL__)
    if (pthread_equal(EplApiProcessImageInstance_g.m_currentThreadId, pthread_self()))
#elif (TARGET_SYSTEM == _WIN32_)
    if (EplApiProcessImageInstance_g.m_dwCurrentThreadId == GetCurrentThreadId())
#elif (TARGET_SYSTEM == _VXWORKS_)
    if (EplApiProcessImageInstance_g.m_currentThreadId == taskIdSelf())
#else
#error "OS currently not supported by EplApiProcessImage!"
#endif
    {
        Ret = EplApiProcessImageExchangeInt(pCopyJob_p);
        if (Ret != kEplSuccessful)
        {
            goto Exit;
        }
        goto Exit;
    }

    if ((EplApiProcessImageInstance_g.m_In.m_uiSize == 0)
            || (EplApiProcessImageInstance_g.m_Out.m_uiSize == 0))
    {   // the process image has been freed
        // therefore, indicate shutdown to application thread
        Ret = kEplShutdown;
        goto Exit;
    }

    IntCopyJob.m_CopyJob = *pCopyJob_p;

    if (pCopyJob_p->m_fNonBlocking == FALSE)
    {
        Ret = EplApiProcessImageCreateCompletion(&IntCopyJob);
        if (Ret != kEplSuccessful)
        {
            EplApiProcessImageDeleteCompletion(&IntCopyJob);
            goto Exit;
        }
    }
#if (TARGET_SYSTEM == _LINUX_) && defined(__KERNEL__)
    else
    {
        Ret = kEplApiPINonBlockingNotSupp;
        goto Exit;
    }
#endif

#if (TARGET_SYSTEM == _LINUX_) && defined(__KERNEL__)
#endif

    Ret = EplApiProcessImagePostCopyJob(&IntCopyJob);

    if (pCopyJob_p->m_fNonBlocking == FALSE)
    {
        if (Ret == kEplSuccessful)
        {
            Ret = EplApiProcessImageWaitForCompletion(&IntCopyJob);

            if ((Ret != kEplSuccessful)
                    || (EplApiProcessImageInstance_g.m_In.m_uiSize == 0)
                    || (EplApiProcessImageInstance_g.m_Out.m_uiSize == 0))
            {   // in the mean time the process image has been freed
                // therefore, indicate shutdown to application thread
                Ret = kEplShutdown;
            }
        }

        EplApiProcessImageDeleteCompletion(&IntCopyJob);
    }

Exit:
    return Ret;
}
Exemplo n.º 15
0
/* The main test function. */
int main( int argc, char * argv[] )
{
	int ret, i;
	pthread_t ch[ NTHREADS ];

	/* Initialize output */
	output_init();

	/* Set the signal mask */
	ret = sigemptyset( &setusr );

	if ( ret != 0 )
	{
		UNRESOLVED( ret, "Failed to empty signal set" );
	}

	ret = sigaddset( &setusr, SIGUSR1 );

	if ( ret != 0 )
	{
		UNRESOLVED( ret, "failed to add SIGUSR1 to signal set" );
	}

	ret = pthread_sigmask( SIG_BLOCK, &setusr, NULL );

	if ( ret != 0 )
	{
		UNRESOLVED( ret, "Failed to block SIGUSR1" );
	}

	/* Create the children */

	for ( i = 0; i < NTHREADS; i++ )
	{
		ret = pthread_create( &ch[ i ], NULL, threaded, NULL );

		if ( ret != 0 )
		{
			UNRESOLVED( ret, "Failed to create a thread" );
		}
	}

	/* raise the signal */
	ret = pthread_kill( ch[ 0 ], SIGUSR1 );

	if ( ret != 0 )
	{
		UNRESOLVED( ret, "Failed to raise the signal" );
	}

	sleep( 1 );

	if ( n_awaken != 1 )
	{
		output( "%d threads were awaken\n", n_awaken );
		FAILED( "Unexpected number of threads awaken" );
	}

	if ( !pthread_equal( last_awaken, ch[ 0 ] ) )
	{
		FAILED( "The awaken thread is not the signal target one." );
	}

	/* Wake other threads */
	for ( i = 1; i < NTHREADS ; i++ )
	{
		ret = pthread_kill( ch[ i ], SIGUSR1 );

		if ( ret != 0 )
		{
			UNRESOLVED( ret, "Failed to raise the signal" );
		}
	}

	/* Wait for child thread termination */
	for ( i = 0; i < NTHREADS; i++ )
	{
		ret = pthread_join( ch[ i ], NULL );

		if ( ret != 0 )
		{
			UNRESOLVED( ret, "Failed to join the thread" );
		}
	}

	/* Test passed */
#if VERBOSE > 0

	output( "Test passed\n" );

#endif

	PASSED;
}
Exemplo n.º 16
0
	bool Host::IsMainThread()
	{
		return pthread_equal(mainThread, pthread_self());
	}
Exemplo n.º 17
0
bool Thread::isCurrent() const
{
    return pthread_equal( pthread_self(), _id._data->pthread );
}
Exemplo n.º 18
0
int
pthread_cancel (pthread_t thread)
     /*
      * ------------------------------------------------------
      * DOCPUBLIC
      *      This function requests cancellation of 'thread'.
      *
      * PARAMETERS
      *      thread
      *              reference to an instance of pthread_t
      *
      *
      * DESCRIPTION
      *      This function requests cancellation of 'thread'.
      *      NOTE: cancellation is asynchronous; use pthread_join to
      *                wait for termination of 'thread' if necessary.
      *
      * RESULTS
      *              0               successfully requested cancellation,
      *              ESRCH           no thread found corresponding to 'thread',
      *              ENOMEM          implicit self thread create failed.
      * ------------------------------------------------------
      */
{
  int result;
  int cancel_self;
  pthread_t self;

  if (thread == NULL )
    {
      return ESRCH;
    }

  result = 0;
  if ((self = pthread_self()) == NULL)
    {
      return ENOMEM;
    };

  /*
   * FIXME!!
   *
   * Can a thread cancel itself?
   *
   * The standard doesn't
   * specify an error to be returned if the target
   * thread is itself.
   *
   * If it may, then we need to ensure that a thread can't
   * deadlock itself trying to cancel itself asyncronously
   * (pthread_cancel is required to be an async-cancel
   * safe function).
   */
  cancel_self = pthread_equal(thread, self);

  /*
   * Lock for async-cancel safety.
   */
  (void) pthread_mutex_lock(&thread->cancelLock);

  if (thread->cancelType == PTHREAD_CANCEL_ASYNCHRONOUS
      && thread->cancelState == PTHREAD_CANCEL_ENABLE
      && thread->state < PThreadStateCanceling )
    {
      if (cancel_self)
        {
          thread->state = PThreadStateCanceling;
          thread->cancelState = PTHREAD_CANCEL_DISABLE;

          (void) pthread_mutex_unlock(&thread->cancelLock);
          ptw32_throw(PTW32_EPS_CANCEL);

          /* Never reached */
        }
      else
        {
          HANDLE threadH = thread->threadH;

          SuspendThread(threadH);

          if (WaitForSingleObject(threadH, 0) == WAIT_TIMEOUT )
            {
              CONTEXT context;

              thread->state = PThreadStateCanceling;
              thread->cancelState = PTHREAD_CANCEL_DISABLE;
              context.ContextFlags = CONTEXT_CONTROL;
              GetThreadContext(threadH, &context);
              PTW32_PROGCTR(context) = (DWORD) ptw32_cancel_self;
              SetThreadContext(threadH, &context);
              (void) pthread_mutex_unlock(&thread->cancelLock);
              ResumeThread(threadH);
            }
        }
    }
  else
    {
      /*
       * Set for deferred cancellation.
       */
      if ( thread->state >= PThreadStateCanceling
           || !SetEvent (thread->cancelEvent))
        {
          result = ESRCH;
        }

      (void) pthread_mutex_unlock(&thread->cancelLock);
    }

  return (result);
}
Exemplo n.º 19
0
void EHSServer::HandleData ( int inTimeoutMilliseconds, ///< milliseconds for timeout on select
							 pthread_t inThreadId ///< numeric ID for this thread to help debug
	)
{

	//fprintf ( stderr, "##### [%d] Trying to lock server mutex\n", inThreadId );

	MUTEX_LOCK ( m_oMutex );

	//fprintf ( stderr, "##### [%d] Got lock on server mutex\n", inThreadId );

	// determine if there are any jobs waiting if this thread should --
	//   if we're running one-thread-per-request and this is the accept thread
	//   we don't look for requests
	HttpRequest * poHttpRequest = NULL;
	if ( m_nServerRunningStatus != SERVERRUNNING_ONETHREADPERREQUEST ||
		 !pthread_equal(inThreadId,m_nAcceptThreadId) ) {

		poHttpRequest = GetNextRequest ( );

	}

	// if we got a request to handle
	if ( poHttpRequest != NULL ) {

		//fprintf ( stderr, "##### [%d] Got a request to handle\n", inThreadId );

		// handle the request and post it back to the connection object
		MUTEX_UNLOCK ( m_oMutex );

		// route the request
		HttpResponse * poHttpResponse = 
			m_poTopLevelEHS->RouteRequest ( poHttpRequest );

		// add the response to the appropriate connection's response list
		poHttpResponse->m_poEHSConnection->AddResponse ( poHttpResponse );

		delete poHttpRequest;

	} 
	// otherwise, no requests are pending
	else {

		// if something is already accepting, sleep
		if ( m_nAccepting ) {
			
			// wait until something happens
			// it's ok to not recheck our condition here, as we'll come back in the same way and recheck then
			//fprintf ( stderr, "##### [%d] Sleeping because no requests and someone else is accepting\n", inThreadId );

			pthread_cond_wait ( & m_oDoneAccepting,
								& m_oMutex );			

			MUTEX_UNLOCK ( m_oMutex );

		} 
		// if no one is accepting, we accept
		else {

			m_nAcceptedNewConnection = 0;
	
            
			
			//fprintf ( stderr, "Accepting\n" );

			// we're now accepting
			m_nAccepting = 1;

			MUTEX_UNLOCK ( m_oMutex );

			// create the FD set for poll
			CreateFdSet();
			int nSocketCount = poll( m_oReadFds.GetFdArray(), m_oReadFds.GetFdCount(), inTimeoutMilliseconds );

			// handle select error
#ifdef _WIN32
			if ( nSocketCount == SOCKET_ERROR )
#else // NOT _WIN32
			if ( nSocketCount == -1 )
#endif // _WIN32
			{

				EHS_TRACE ( "[%d] Critical Error: select() failed.  Aborting\n", inThreadId );

				// Idea! Remove stupid idea
				//exit ( 0 );
			}
			
			
			
			// if no sockets have data to read, clear accepting flag and return
			if ( nSocketCount > 0 ) {

				// Check the accept socket for a new connection
				CheckAcceptSocket ( );

				// check client sockets for data
				CheckClientSockets ( );

			}

			MUTEX_LOCK ( m_oMutex );
			ClearIdleConnections ( );
			m_nAccepting = 0;
			MUTEX_UNLOCK ( m_oMutex );

            // Occasional pulse for updating of things
            m_poTopLevelEHS->HttpPulse ();

		} // END ACCEPTING
		
	} // END NO REQUESTS PENDING


    MUTEX_LOCK ( m_oMutex );

    // Delete unused connections after all threads have been synced
    EHSConnectionList :: iterator iter = m_oEHSConnectionUnusedList.begin ();
    while ( iter != m_oEHSConnectionUnusedList.end () )
    {
        EHSConnection* pConnection = *iter;
        // Delete it after all threads are past syncing point
        if ( pConnection->m_UnusedSyncId < m_ThreadsSyncPoint.GetSyncId () - 1 )
        {
            iter = m_oEHSConnectionUnusedList.erase ( iter );
            delete pConnection;
        }
        else
            iter++;
    }

    MUTEX_UNLOCK ( m_oMutex );

}
Exemplo n.º 20
0
gboolean
mono_native_thread_id_equals (MonoNativeThreadId id1, MonoNativeThreadId id2)
{
	return pthread_equal (id1, id2);
}
inline bool equal_thread_id(OS_thread_id_t id1, OS_thread_id_t id2)
{  return 0 != pthread_equal(id1, id2);  }
Exemplo n.º 22
0
static int pthreadMutexHeld(sqlite3_mutex *p){
  return (p->nRef!=0 && pthread_equal(p->owner, pthread_self()));
}
Exemplo n.º 23
0
// if found, the data void* is returned. if not, it is set to be
// the given *data and a new hash table entry is created, which can be
// found using the given key later on.
dt_cache_entry_t *dt_cache_get_with_caller(dt_cache_t *cache, const uint32_t key, char mode, const char *file, int line)
{
  gpointer orig_key, value;
  gboolean res;
  int result;
  double start = dt_get_wtime();
restart:
  dt_pthread_mutex_lock(&cache->lock);
  res = g_hash_table_lookup_extended(
      cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
  if(res)
  { // yay, found. read lock and pass on.
    dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
    if(mode == 'w') result = dt_pthread_rwlock_trywrlock_with_caller(&entry->lock, file, line);
    else            result = dt_pthread_rwlock_tryrdlock_with_caller(&entry->lock, file, line);
    if(result)
    { // need to give up mutex so other threads have a chance to get in between and
      // free the lock we're trying to acquire:
      dt_pthread_mutex_unlock(&cache->lock);
      g_usleep(5);
      goto restart;
    }
    // bubble up in lru list:
    cache->lru = g_list_remove_link(cache->lru, entry->link);
    cache->lru = g_list_concat(cache->lru, entry->link);
    dt_pthread_mutex_unlock(&cache->lock);

#ifdef _DEBUG
    const pthread_t writer = dt_pthread_rwlock_get_writer(&entry->lock);
    if(mode == 'w')
    {
      assert(pthread_equal(writer, pthread_self()));
    }
    else
    {
      assert(!pthread_equal(writer, pthread_self()));
    }
#endif

    if(mode == 'w')
    {
      assert(entry->data_size);
      ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
    }

    // WARNING: do *NOT* unpoison here. it must be done by the caller!

    return entry;
  }

  // else, not found, need to allocate.

  // first try to clean up.
  // also wait if we can't free more than the requested fill ratio.
  if(cache->cost > 0.8f * cache->cost_quota)
  {
    // need to roll back all the way to get a consistent lock state:
    dt_cache_gc(cache, 0.8f);
  }

  // here dies your 32-bit system:
  dt_cache_entry_t *entry = (dt_cache_entry_t *)g_slice_alloc(sizeof(dt_cache_entry_t));
  int ret = dt_pthread_rwlock_init(&entry->lock, 0);
  if(ret) fprintf(stderr, "rwlock init: %d\n", ret);
  entry->data = 0;
  entry->data_size = cache->entry_size;
  entry->cost = 1;
  entry->link = g_list_append(0, entry);
  entry->key = key;
  entry->_lock_demoting = 0;

  g_hash_table_insert(cache->hashtable, GINT_TO_POINTER(key), entry);

  assert(cache->allocate || entry->data_size);

  if(cache->allocate)
    cache->allocate(cache->allocate_data, entry);
  else
    entry->data = dt_alloc_align(16, entry->data_size);

  assert(entry->data_size);
  ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);

  // if allocate callback is given, always return a write lock
  const int write = ((mode == 'w') || cache->allocate);

  // write lock in case the caller requests it:
  if(write) dt_pthread_rwlock_wrlock_with_caller(&entry->lock, file, line);
  else      dt_pthread_rwlock_rdlock_with_caller(&entry->lock, file, line);

  cache->cost += entry->cost;

  // put at end of lru list (most recently used):
  cache->lru = g_list_concat(cache->lru, entry->link);

  dt_pthread_mutex_unlock(&cache->lock);
  double end = dt_get_wtime();
  if(end - start > 0.1)
    fprintf(stderr, "wait time %.06fs\n", end - start);

  // WARNING: do *NOT* unpoison here. it must be done by the caller!

  return entry;
}
Exemplo n.º 24
0
static int pthreadMutexNotheld(sqlite3_mutex *p){
  return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0;
}
Exemplo n.º 25
0
int in_async(void)
{
	if (!main_thread_set)
		return 0; /* no asyncs started yet */
	return !pthread_equal(main_thread, pthread_self());
}
Exemplo n.º 26
0
int adaptor_init(zhandle_t *zh)
{
    pthread_mutexattr_t recursive_mx_attr;
    struct adaptor_threads *adaptor_threads = calloc(1, sizeof(*adaptor_threads));
    if (!adaptor_threads) {
        LOG_ERROR(("Out of memory"));
        return -1;
    }

    /* We use a pipe for interrupting select() in unix/sol and socketpair in windows. */
#ifdef WIN32   
    if (create_socket_pair(adaptor_threads->self_pipe) == -1){
       LOG_ERROR(("Can't make a socket."));
#else
    if(pipe(adaptor_threads->self_pipe)==-1) {
        LOG_ERROR(("Can't make a pipe %d",errno));
#endif
        free(adaptor_threads);
        return -1;
    }
    set_nonblock(adaptor_threads->self_pipe[1]);
    set_nonblock(adaptor_threads->self_pipe[0]);

    pthread_mutex_init(&zh->auth_h.lock,0);

    zh->adaptor_priv = adaptor_threads;
    pthread_mutex_init(&zh->to_process.lock,0);
    pthread_mutex_init(&adaptor_threads->zh_lock,0);
    pthread_mutex_init(&adaptor_threads->reconfig_lock,0);
    // to_send must be recursive mutex    
    pthread_mutexattr_init(&recursive_mx_attr);
    pthread_mutexattr_settype(&recursive_mx_attr, PTHREAD_MUTEX_RECURSIVE);
    pthread_mutex_init(&zh->to_send.lock,&recursive_mx_attr);
    pthread_mutexattr_destroy(&recursive_mx_attr);
    
    pthread_mutex_init(&zh->sent_requests.lock,0);
    pthread_cond_init(&zh->sent_requests.cond,0);
    pthread_mutex_init(&zh->completions_to_process.lock,0);
    pthread_cond_init(&zh->completions_to_process.cond,0);
    start_threads(zh);
    return 0;
}

void adaptor_finish(zhandle_t *zh)
{
    struct adaptor_threads *adaptor_threads;
    // make sure zh doesn't get destroyed until after we're done here
    api_prolog(zh); 
    adaptor_threads = zh->adaptor_priv;
    if(adaptor_threads==0) {
        api_epilog(zh,0);
        return;
    }

    if(!pthread_equal(adaptor_threads->io,pthread_self())){
        wakeup_io_thread(zh);
        pthread_join(adaptor_threads->io, 0);
    }else
        pthread_detach(adaptor_threads->io);
    
    if(!pthread_equal(adaptor_threads->completion,pthread_self())){
        pthread_mutex_lock(&zh->completions_to_process.lock);
        pthread_cond_broadcast(&zh->completions_to_process.cond);
        pthread_mutex_unlock(&zh->completions_to_process.lock);
        pthread_join(adaptor_threads->completion, 0);
    }else
        pthread_detach(adaptor_threads->completion);
    
    api_epilog(zh,0);
}

void adaptor_destroy(zhandle_t *zh)
{
    struct adaptor_threads *adaptor = zh->adaptor_priv;
    if(adaptor==0) return;
    
    pthread_cond_destroy(&adaptor->cond);
    pthread_mutex_destroy(&adaptor->lock);
    pthread_mutex_destroy(&zh->to_process.lock);
    pthread_mutex_destroy(&zh->to_send.lock);
    pthread_mutex_destroy(&zh->sent_requests.lock);
    pthread_cond_destroy(&zh->sent_requests.cond);
    pthread_mutex_destroy(&zh->completions_to_process.lock);
    pthread_cond_destroy(&zh->completions_to_process.cond);
    pthread_mutex_destroy(&adaptor->zh_lock);

    pthread_mutex_destroy(&zh->auth_h.lock);

    close(adaptor->self_pipe[0]);
    close(adaptor->self_pipe[1]);
    free(adaptor);
    zh->adaptor_priv=0;
}

int wakeup_io_thread(zhandle_t *zh)
{
    struct adaptor_threads *adaptor_threads = zh->adaptor_priv;
    char c=0;
#ifndef WIN32
    return write(adaptor_threads->self_pipe[1],&c,1)==1? ZOK: ZSYSTEMERROR;    
#else
    return send(adaptor_threads->self_pipe[1], &c, 1, 0)==1? ZOK: ZSYSTEMERROR;    
#endif         
}

int adaptor_send_queue(zhandle_t *zh, int timeout)
{
    if(!zh->close_requested)
        return wakeup_io_thread(zh);
    // don't rely on the IO thread to send the messages if the app has
    // requested to close 
    return flush_send_queue(zh, timeout);
}

/* These two are declared here because we will run the event loop
 * and not the client */
#ifdef WIN32
int zookeeper_interest(zhandle_t *zh, SOCKET *fd, int *interest,
        struct timeval *tv);
#else
int zookeeper_interest(zhandle_t *zh, int *fd, int *interest,
        struct timeval *tv);
#endif
int zookeeper_process(zhandle_t *zh, int events);

#ifdef WIN32
unsigned __stdcall do_io( void * v)
#else
void *do_io(void *v)
#endif
{
    zhandle_t *zh = (zhandle_t*)v;
#ifndef WIN32
    struct pollfd fds[2];
    struct adaptor_threads *adaptor_threads = zh->adaptor_priv;

    api_prolog(zh);
    notify_thread_ready(zh);
    LOG_DEBUG(("started IO thread"));
    fds[0].fd=adaptor_threads->self_pipe[0];
    fds[0].events=POLLIN;
    while(!zh->close_requested) {
        struct timeval tv;
        int fd;
        int interest;
        int timeout;
        int maxfd=1;
        int rc;
        
        zookeeper_interest(zh, &fd, &interest, &tv);
        if (fd != -1) {
            fds[1].fd=fd;
            fds[1].events=(interest&ZOOKEEPER_READ)?POLLIN:0;
            fds[1].events|=(interest&ZOOKEEPER_WRITE)?POLLOUT:0;
            maxfd=2;
        }
        timeout=tv.tv_sec * 1000 + (tv.tv_usec/1000);
        
        poll(fds,maxfd,timeout);
        if (fd != -1) {
            interest=(fds[1].revents&POLLIN)?ZOOKEEPER_READ:0;
            interest|=((fds[1].revents&POLLOUT)||(fds[1].revents&POLLHUP))?ZOOKEEPER_WRITE:0;
        }
        if(fds[0].revents&POLLIN){
            // flush the pipe
            char b[128];
            while(read(adaptor_threads->self_pipe[0],b,sizeof(b))==sizeof(b)){}
        }        
#else
    fd_set rfds, wfds, efds;
    struct adaptor_threads *adaptor_threads = zh->adaptor_priv;
    api_prolog(zh);
    notify_thread_ready(zh);
    LOG_DEBUG(("started IO thread"));
    FD_ZERO(&rfds);   FD_ZERO(&wfds);    FD_ZERO(&efds);
    while(!zh->close_requested) {      
        struct timeval tv;
        SOCKET fd;
        SOCKET maxfd=adaptor_threads->self_pipe[0];
        int interest;        
        int rc;
               
       zookeeper_interest(zh, &fd, &interest, &tv);
       if (fd != -1) {
           if (interest&ZOOKEEPER_READ) {
                FD_SET(fd, &rfds);
            } else {
                FD_CLR(fd, &rfds);
            }
           if (interest&ZOOKEEPER_WRITE) {
                FD_SET(fd, &wfds);
            } else {
                FD_CLR(fd, &wfds);
            }                  
        }
       FD_SET( adaptor_threads->self_pipe[0] ,&rfds );        
       rc = select((int)maxfd, &rfds, &wfds, &efds, &tv);
       if (fd != -1) 
       {
           interest = (FD_ISSET(fd, &rfds))? ZOOKEEPER_READ:0;
           interest|= (FD_ISSET(fd, &wfds))? ZOOKEEPER_WRITE:0;
        }
               
       if (FD_ISSET(adaptor_threads->self_pipe[0], &rfds)){
            // flush the pipe/socket
            char b[128];
           while(recv(adaptor_threads->self_pipe[0],b,sizeof(b), 0)==sizeof(b)){}
       }
#endif
        // dispatch zookeeper events
        rc = zookeeper_process(zh, interest);
        // check the current state of the zhandle and terminate 
        // if it is_unrecoverable()
        if(is_unrecoverable(zh))
            break;
    }
    api_epilog(zh, 0);    
    LOG_DEBUG(("IO thread terminated"));
    return 0;
}

#ifdef WIN32
unsigned __stdcall do_completion( void * v)
#else
void *do_completion(void *v)
#endif
{
    zhandle_t *zh = v;
    api_prolog(zh);
    notify_thread_ready(zh);
    LOG_DEBUG(("started completion thread"));
    while(!zh->close_requested) {
        pthread_mutex_lock(&zh->completions_to_process.lock);
        while(!zh->completions_to_process.head && !zh->close_requested) {
            pthread_cond_wait(&zh->completions_to_process.cond, &zh->completions_to_process.lock);
        }
        pthread_mutex_unlock(&zh->completions_to_process.lock);
        process_completions(zh);
    }
    api_epilog(zh, 0);    
    LOG_DEBUG(("completion thread terminated"));
    return 0;
}
Exemplo n.º 27
0
bool	CAMutex::Try(bool& outWasLocked)
{
	bool theAnswer = false;
	outWasLocked = false;

#if TARGET_OS_MAC
	pthread_t theCurrentThread = pthread_self();
	if(!pthread_equal(theCurrentThread, mOwner))
	{
		//	this means the current thread doesn't already own the lock
		#if	Log_Ownership
			DebugPrintfRtn(DebugPrintfFileComma "%p %.4f: CAMutex::Try: thread %p is try-locking %s, owner: %p\n", theCurrentThread, ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), theCurrentThread, mName, mOwner);
		#endif

		//	go ahead and call trylock to see if we can lock it.
		int theError = pthread_mutex_trylock(&mMutex);
		if(theError == 0)
		{
			//	return value of 0 means we successfully locked the lock
			mOwner = theCurrentThread;
			theAnswer = true;
			outWasLocked = true;
	
			#if	Log_Ownership
				DebugPrintfRtn(DebugPrintfFileComma "%p %.4f: CAMutex::Try: thread %p has locked %s, owner: %p\n", theCurrentThread, ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), theCurrentThread, mName, mOwner);
			#endif
		}
		else if(theError == EBUSY)
		{
			//	return value of EBUSY means that the lock was already locked by another thread
			theAnswer = false;
			outWasLocked = false;
	
			#if	Log_Ownership
				DebugPrintfRtn(DebugPrintfFileComma "%p %.4f: CAMutex::Try: thread %p failed to lock %s, owner: %p\n", theCurrentThread, ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), theCurrentThread, mName, mOwner);
			#endif
		}
		else
		{
			//	any other return value means something really bad happenned
			ThrowIfError(theError, CAException(theError), "CAMutex::Try: call to pthread_mutex_trylock failed");
		}
	}
	else
	{
		//	this means the current thread already owns the lock
		theAnswer = true;
		outWasLocked = false;
	}
#elif TARGET_OS_WIN32
	if(mOwner != GetCurrentThreadId())
	{
		//	this means the current thread doesn't own the lock
		#if	Log_Ownership
			DebugPrintfRtn(DebugPrintfFileComma "%lu %.4f: CAMutex::Try: thread %lu is try-locking %s, owner: %lu\n", GetCurrentThreadId(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), GetCurrentThreadId(), mName, mOwner);
		#endif
		
		//	try to acquire the mutex
		OSStatus theError = WaitForSingleObject(mMutex, 0);
		if(theError == WAIT_OBJECT_0)
		{
			//	this means we successfully locked the lock
			mOwner = GetCurrentThreadId();
			theAnswer = true;
			outWasLocked = true;
	
			#if	Log_Ownership
				DebugPrintfRtn(DebugPrintfFileComma "%lu %.4f: CAMutex::Try: thread %lu has locked %s, owner: %lu\n", GetCurrentThreadId(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), GetCurrentThreadId(), mName, mOwner);
			#endif
		}
		else if(theError == WAIT_TIMEOUT)
		{
			//	this means that the lock was already locked by another thread
			theAnswer = false;
			outWasLocked = false;
	
			#if	Log_Ownership
				DebugPrintfRtn(DebugPrintfFileComma "%lu %.4f: CAMutex::Try: thread %lu failed to lock %s, owner: %lu\n", GetCurrentThreadId(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), GetCurrentThreadId(), mName, mOwner);
			#endif
		}
		else
		{
			//	any other return value means something really bad happenned
			ThrowIfError(theError, CAException(GetLastError()), "CAMutex::Try: call to lock the mutex failed");
		}
	}
	else
	{
		//	this means the current thread already owns the lock
		theAnswer = true;
		outWasLocked = false;
	}
#endif
	
	return theAnswer;
}
Exemplo n.º 28
0
gboolean dt_control_gdk_haslock()
{
  if(pthread_equal(darktable.control->gui_thread, pthread_self()) != 0) return TRUE;
  return _control_gdk_lock_mine;
}
Exemplo n.º 29
0
static int main_thread_self(void) { return pthread_equal(pthread_self(), main_thread); }
Exemplo n.º 30
0
int
pthread_mutex_unlock (pthread_mutex_t * mutex)
{
  int result = 0;
  int kind;
  pthread_mutex_t mx;

  /*
   * Let the system deal with invalid pointers.
   */

  mx = *mutex;

  /*
   * If the thread calling us holds the mutex then there is no
   * race condition. If another thread holds the
   * lock then we shouldn't be in here.
   */
  if (mx < PTHREAD_ERRORCHECK_MUTEX_INITIALIZER)
    {
      kind = mx->kind;

      if (kind >= 0)
        {
          if (kind == PTHREAD_MUTEX_NORMAL)
	    {
	      LONG idx;

	      idx = (LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx,
							    (PTW32_INTERLOCKED_LONG)0);
	      if (idx != 0)
	        {
	          if (idx < 0)
		    {
		      /*
		       * Someone may be waiting on that mutex.
		       */
		      if (SetEvent (mx->event) == 0)
		        {
		          result = EINVAL;
		        }
		    }
	        }
	    }
          else
	    {
	      if (pthread_equal (to_pthread(mx->ownerThread), pthread_self()))
	        {
	          if (kind != PTHREAD_MUTEX_RECURSIVE
		      || 0 == --mx->recursive_count)
		    {
		      mx->ownerThread.p = NULL;

		      if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx,
							          (PTW32_INTERLOCKED_LONG)0) < 0L)
		        {
		          /* Someone may be waiting on that mutex */
		          if (SetEvent (mx->event) == 0)
			    {
			      result = EINVAL;
			    }
		        }
		    }
	        }
	      else
	        {
	          result = EPERM;
	        }
	    }
        }
      else
        {
          /* Robust types */
          pthread_t self = pthread_self();
          kind = -kind - 1; /* Convert to non-robust range */

          /*
           * The thread must own the lock regardless of type if the mutex
           * is robust.
           */
          if (pthread_equal (to_pthread(mx->ownerThread), self))
            {
              PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->robustNode->stateInconsistent,
                                                      (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE,
                                                      (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT);
              if (PTHREAD_MUTEX_NORMAL == kind)
                {
                  ptw32_robust_mutex_remove(mutex, NULL);

                  if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
                                                             (PTW32_INTERLOCKED_LONG) 0) < 0)
                    {
                      /*
                       * Someone may be waiting on that mutex.
                       */
                      if (SetEvent (mx->event) == 0)
                        {
                          result = EINVAL;
                        }
                    }
                }
              else
                {
                  if (kind != PTHREAD_MUTEX_RECURSIVE
                      || 0 == --mx->recursive_count)
                    {
                      ptw32_robust_mutex_remove(mutex, NULL);

                      if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
                                                                 (PTW32_INTERLOCKED_LONG) 0) < 0)
                        {
                          /*
                           * Someone may be waiting on that mutex.
                           */
                          if (SetEvent (mx->event) == 0)
                            {
                              result = EINVAL;
                            }
                        }
                    }
                }
            }
          else
            {
              result = EPERM;
            }
        }
    }
  else if (mx != PTHREAD_MUTEX_INITIALIZER)
    {
      result = EINVAL;
    }

  return (result);
}