Exemple #1
0
/**
 * Waits for a condition variable up to a certain date.
 * This works like vlc_cond_wait(), except for the additional time-out.
 *
 * If the variable was initialized with vlc_cond_init(), the timeout has the
 * same arbitrary origin as mdate(). If the variable was initialized with
 * vlc_cond_init_daytime(), the timeout is expressed from the Unix epoch.
 *
 * @param p_condvar condition variable to wait on
 * @param p_mutex mutex which is unlocked while waiting,
 *                then locked again when waking up.
 * @param deadline <b>absolute</b> timeout
 *
 * @return 0 if the condition was signaled, an error code in case of timeout.
 */
int vlc_cond_timedwait (vlc_cond_t *p_condvar, vlc_mutex_t *p_mutex,
                        mtime_t deadline)
{
#if defined(__APPLE__) && !defined(__powerpc__) && !defined( __ppc__ ) && !defined( __ppc64__ )
    /* mdate() is the monotonic clock, timedwait origin is gettimeofday() which
     * isn't monotonic. Use imedwait_relative_np() instead
    */
    mtime_t base = mdate();
    deadline -= base;
    if (deadline < 0)
        deadline = 0;
    lldiv_t d = lldiv( deadline, CLOCK_FREQ );
    struct timespec ts = { d.quot, d.rem * (1000000000 / CLOCK_FREQ) };

    int val = pthread_cond_timedwait_relative_np(p_condvar, p_mutex, &ts);
    if (val != ETIMEDOUT)
        VLC_THREAD_ASSERT ("timed-waiting on condition");
    return val;
#else
    lldiv_t d = lldiv( deadline, CLOCK_FREQ );
    struct timespec ts = { d.quot, d.rem * (1000000000 / CLOCK_FREQ) };
    int val = pthread_cond_timedwait (p_condvar, p_mutex, &ts);
    if (val != ETIMEDOUT)
        VLC_THREAD_ASSERT ("timed-waiting on condition");
    return val;
#endif
}
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
    int r;
    struct timespec ts;

#if defined(__APPLE__) && defined(__MACH__)
    ts.tv_sec = timeout / NANOSEC;
    ts.tv_nsec = timeout % NANOSEC;
    r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
    timeout += uv__hrtime();
    ts.tv_sec = timeout / NANOSEC;
    ts.tv_nsec = timeout % NANOSEC;
#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
    r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
#else
    r = pthread_cond_timedwait(cond, mutex, &ts);
#endif /* __ANDROID__ */
#endif

    if (r == 0) return 0;

    if (r == ETIMEDOUT) return -1;

    JXABORT("19");
    return -1; /* Satisfy the compiler. */
}
Exemple #3
0
void uv_cond_destroy(uv_cond_t* cond) {
#if defined(__APPLE__) && defined(__MACH__)
  /* It has been reported that destroying condition variables that have been
   * signalled but not waited on can sometimes result in application crashes.
   * See https://codereview.chromium.org/1323293005.
   */
  pthread_mutex_t mutex;
  struct timespec ts;
  int err;

  if (pthread_mutex_init(&mutex, NULL))
    abort();

  if (pthread_mutex_lock(&mutex))
    abort();

  ts.tv_sec = 0;
  ts.tv_nsec = 1;

  err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
  if (err != 0 && err != ETIMEDOUT)
    abort();

  if (pthread_mutex_unlock(&mutex))
    abort();

  if (pthread_mutex_destroy(&mutex))
    abort();
#endif /* defined(__APPLE__) && defined(__MACH__) */

  if (pthread_cond_destroy(cond))
    abort();
}
Exemple #4
0
int tds_raw_cond_timedwait(tds_condition *cond, tds_raw_mutex *mtx, int timeout_sec)
{
	struct timespec ts;
#if !defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP) && !defined(USE_CLOCK_IN_COND)
	struct timeval tv;
#endif

	if (timeout_sec < 0)
		return tds_raw_cond_wait(cond, mtx);

#if defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP)
	ts.tv_sec = timeout_sec;
	ts.tv_nsec = 0;
	return pthread_cond_timedwait_relative_np(cond, mtx, &ts);
#else

#  ifdef USE_CLOCK_IN_COND
#    if defined(USE_MONOTONIC_CLOCK_IN_COND)
	clock_gettime(CLOCK_MONOTONIC, &ts);
#    else
	clock_gettime(CLOCK_REALTIME, &ts);
#    endif
#  elif defined(HAVE_GETTIMEOFDAY)
	gettimeofday(&tv, NULL);
	ts.tv_sec = tv.tv_sec;
	ts.tv_nsec = tv.tv_usec * 1000u;
#  else
#  error No way to get a proper time!
#  endif

	ts.tv_sec += timeout_sec;
	return pthread_cond_timedwait(cond, mtx, &ts);
#endif
}
Exemple #5
0
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
  int r;
  struct timespec ts;

#if defined(__APPLE__) && defined(__MACH__)
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
  r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
  timeout += uv__hrtime();
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
  r = pthread_cond_timedwait(cond, mutex, &ts);
#endif


  if (r == 0)
    return 0;

  if (r == ETIMEDOUT)
    return -1;

  abort();
  return -1; /* Satisfy the compiler. */
}
Exemple #6
0
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
  int r;
  struct timespec ts;

#if defined(__APPLE__) && defined(__MACH__)
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
  r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
  timeout += uv__hrtime(UV_CLOCK_PRECISE);
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
  /*
   * The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
   * but has this alternative function instead.
   */
  r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
#else
  r = pthread_cond_timedwait(cond, mutex, &ts);
#endif /* __ANDROID__ */
#endif


  if (r == 0)
    return 0;

  if (r == ETIMEDOUT)
    return -ETIMEDOUT;

  abort();
  return -EINVAL;  /* Satisfy the compiler. */
}
Exemple #7
0
	bool Semaphore::wait(int32_t _msecs)
	{
		int result = pthread_mutex_lock(&m_mutex);
		BX_CHECK(0 == result, "pthread_mutex_lock %d", result);

#		if BX_PLATFORM_NACL || BX_PLATFORM_OSX
		BX_UNUSED(_msecs);
		BX_CHECK(-1 == _msecs, "NaCl and OSX don't support pthread_cond_timedwait at this moment.");
		while (0 == result
		&&	 0 >= m_count)
		{
			result = pthread_cond_wait(&m_cond, &m_mutex);
		}
#		elif BX_PLATFORM_IOS
		if (-1 == _msecs)
		{
			while (0 == result
			&&     0 >= m_count)
			{
				result = pthread_cond_wait(&m_cond, &m_mutex);
			}
		}
		else
		{
			timespec ts;
			ts.tv_sec = _msecs/1000;
			ts.tv_nsec = (_msecs%1000)*1000;

			while (0 == result
			&&     0 >= m_count)
			{
				result = pthread_cond_timedwait_relative_np(&m_cond, &m_mutex, &ts);
			}
		}
#		else
		timespec ts;
		clock_gettime(CLOCK_REALTIME, &ts);
		ts.tv_sec += _msecs/1000;
		ts.tv_nsec += (_msecs%1000)*1000;

		while (0 == result
		&&     0 >= m_count)
		{
			result = pthread_cond_timedwait(&m_cond, &m_mutex, &ts);
		}
#		endif // BX_PLATFORM_NACL || BX_PLATFORM_OSX
		bool ok = 0 == result;

		if (ok)
		{
			--m_count;
		}

		result = pthread_mutex_unlock(&m_mutex);
		BX_CHECK(0 == result, "pthread_mutex_unlock %d", result);

		BX_UNUSED(result);

		return ok;
	}
Exemple #8
0
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
  int r;
  struct timespec ts;
#if defined(__MVS__)
  struct timeval tv;
#endif

#if defined(__APPLE__) && defined(__MACH__)
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
  r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
#if defined(__MVS__)
  if (gettimeofday(&tv, NULL))
    abort();
  timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
#else
  timeout += uv__hrtime(UV_CLOCK_PRECISE);
#endif
  ts.tv_sec = timeout / NANOSEC;
  ts.tv_nsec = timeout % NANOSEC;
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21

  /*
   * The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
   * but has this alternative function instead.
   */
  r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
#else
  r = pthread_cond_timedwait(cond, mutex, &ts);
#endif /* __ANDROID_API__ */
#endif


  if (r == 0)
    return 0;

  if (r == ETIMEDOUT)
    return UV_ETIMEDOUT;

  abort();
  return UV_EINVAL;  /* Satisfy the compiler. */
}
/// connect all main threads and then wait until they finish.  Return success if completed.
static int
supRunOnce (SupMain *mains) {
    int success = 0;
    struct timespec timeout = { 30, 0 };

    for (size_t index = 0; index < SUP_MAIN_COUNT; index++)
        supMainConnect (mains[index]);

    // We wait for supAssertHandler to signal that we've handled a worker BRFail.

    pthread_mutex_lock (&done_lock);
    switch (pthread_cond_timedwait_relative_np (&done_cond, &done_lock, &timeout)) {
        case ETIMEDOUT: break;
        default: success = 1; break;
    }
    pthread_mutex_unlock (&done_lock);

    return success && supConfirmComplete(mains);
}
Exemple #10
0
/*-----------------------------------------------------------------------------------*/
static u32_t
cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, u32_t timeout)
{
  struct timespec rtime1, rtime2, ts;
  int ret;

  if (timeout == 0) {
    pthread_cond_wait(cond, mutex);
    return 0;
  }

  /* Get a timestamp and add the timeout value. */
  get_monotonic_time(&rtime1);
#ifdef LWIP_UNIX_MACH
  ts.tv_sec = timeout / 1000L;
  ts.tv_nsec = (timeout % 1000L) * 1000000L;
  ret = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
  ts.tv_sec = rtime1.tv_sec + timeout / 1000L;
  ts.tv_nsec = rtime1.tv_nsec + (timeout % 1000L) * 1000000L;
  if (ts.tv_nsec >= 1000000000L) {
    ts.tv_sec++;
    ts.tv_nsec -= 1000000000L;
  }

  ret = pthread_cond_timedwait(cond, mutex, &ts);
#endif
  if (ret == ETIMEDOUT) {
    return SYS_ARCH_TIMEOUT;
  }

  /* Calculate for how long we waited for the cond. */
  get_monotonic_time(&rtime2);
  ts.tv_sec = rtime2.tv_sec - rtime1.tv_sec;
  ts.tv_nsec = rtime2.tv_nsec - rtime1.tv_nsec;
  if (ts.tv_nsec < 0) {
    ts.tv_sec--;
    ts.tv_nsec += 1000000000L;
  }
  return ts.tv_sec * 1000L + ts.tv_nsec / 1000000L;
}
static void *
supWorkerThread (SupWorker worker) {
    struct timespec timeout = { 1, 0 };   // 1 second
    printf ("Work (%p): Run\n", worker);

    pthread_mutex_lock(&worker->lock);
    while (1) {
        switch (pthread_cond_timedwait_relative_np (&worker->cond, &worker->lock, &timeout)) {
            case ETIMEDOUT:
                if (0 == arc4random_uniform (10 * DEFAULT_WORKERS)) {
                    printf ("Work (%p): Fail\n", worker);
                    pthread_mutex_unlock(&worker->lock);
                    BRFail();
                }
                break;
            default:
                pthread_mutex_unlock(&worker->lock);
                pthread_exit(NULL);
       }
    }
}
Exemple #12
0
int
tvh_cond_timedwait
  ( tvh_cond_t *cond, pthread_mutex_t *mutex, int64_t monoclock )
{
#if defined(PLATFORM_DARWIN)
  /* Use a relative timedwait implementation */
  int64_t now = getmonoclock();
  int64_t relative = monoclock - now;

  struct timespec ts;
  ts.tv_sec  = relative / MONOCLOCK_RESOLUTION;
  ts.tv_nsec = (relative % MONOCLOCK_RESOLUTION) *
               (1000000000ULL/MONOCLOCK_RESOLUTION);

  return pthread_cond_timedwait_relative_np(&cond->cond, mutex, &ts);
#else
  struct timespec ts;
  ts.tv_sec = monoclock / MONOCLOCK_RESOLUTION;
  ts.tv_nsec = (monoclock % MONOCLOCK_RESOLUTION) *
               (1000000000ULL/MONOCLOCK_RESOLUTION);
  return pthread_cond_timedwait(&cond->cond, mutex, &ts);
#endif
}
CL_BOOL
CL_ThreadUnix::Wait(DWORD msec)
{
    INT ret = 0;

    pthread_mutex_lock(&m_mutex);

    if (m_bSignalFlg == CL_FALSE)
    {
        if (INFINITE != msec)
        {
#ifdef _FOR_APPLE_
            struct timespec nptime;
            nptime.tv_sec = msec/ 1000;
            nptime.tv_nsec = msec% 1000*1000000;

            ret = pthread_cond_timedwait_relative_np(&m_cond, &m_mutex, &nptime);

#elif defined(_FOR_ANDROID_)
            struct timespec nptime;
            gettimespec(&nptime, msec);

            ret = pthread_cond_timedwait_monotonic_np(&m_cond, &m_mutex, &nptime);

#else // _LINUX
            struct timespec nptime;
            gettimespec(&nptime, msec);

            ret = pthread_cond_timedwait(&m_cond, &m_mutex, &nptime);
#endif
        }
        else
        {
            ret = pthread_cond_wait(&m_cond, &m_mutex);
        }
    }

    m_bSignalFlg = CL_FALSE;

    pthread_mutex_unlock(&m_mutex);

    // DWORD *pAddr = NULL; // [0] timer id; [1] timer pointer.
    while (1)
    {
        m_cSyncMSg.SyncStart();
        intptr_t* pAddr = (intptr_t*)m_cMsgQue.Pop();
        m_cSyncMSg.SyncEnd();
        if (pAddr != NULL)
        {
            if (CL_Timer::IsValid(*pAddr) == TRUE)
            {
                reinterpret_cast<CL_Timer*>(*(pAddr+1))->DoAction();
            }
            delete[] pAddr;
        }
        else
        {
            break;
        }
    }

    // ETIMEDOUT
    if (0 == ret)
    {
        return CL_TRUE;
    }
    else
    {
        return CL_FALSE;
    }
}
Exemple #14
0
// get the next frame, when available. return 0 if underrun/stream reset.
static abuf_t *buffer_get_frame(void) {
  int16_t buf_fill;
  uint64_t local_time_now;
  // struct timespec tn;
  abuf_t *abuf = 0;
  int i;
  abuf_t *curframe;

  pthread_mutex_lock(&ab_mutex);
  int wait;
  int32_t dac_delay = 0;
  do {
    // get the time
    local_time_now = get_absolute_time_in_fp();

    // if config.timeout (default 120) seconds have elapsed since the last audio packet was
    // received, then we should stop.
    // config.timeout of zero means don't check..., but iTunes may be confused by a long gap
    // followed by a resumption...

    if ((time_of_last_audio_packet != 0) && (shutdown_requested == 0) &&
        (config.dont_check_timeout == 0)) {
      uint64_t ct = config.timeout; // go from int to 64-bit int
      if ((local_time_now > time_of_last_audio_packet) &&
          (local_time_now - time_of_last_audio_packet >= ct << 32)) {
        debug(1, "As Yeats almost said, \"Too long a silence / can make a stone of the heart\"");
        rtsp_request_shutdown_stream();
        shutdown_requested = 1;
      }
    }
    int rco = get_requested_connection_state_to_output();

    if (connection_state_to_output != rco) {
      connection_state_to_output = rco;
      // change happening
      if (connection_state_to_output == 0) { // going off
        pthread_mutex_lock(&flush_mutex);
        flush_requested = 1;
        pthread_mutex_unlock(&flush_mutex);
      }
    }

    pthread_mutex_lock(&flush_mutex);
    if (flush_requested == 1) {
      if (config.output->flush)
        config.output->flush();
      ab_resync();
      first_packet_timestamp = 0;
      first_packet_time_to_play = 0;
      time_since_play_started = 0;
      flush_requested = 0;
    }
    pthread_mutex_unlock(&flush_mutex);
    uint32_t flush_limit = 0;
    if (ab_synced) {
      do {
        curframe = audio_buffer + BUFIDX(ab_read);
        if (curframe->ready) {

          if (curframe->sequence_number != ab_read) {
            // some kind of sync problem has occurred.
            if (BUFIDX(curframe->sequence_number) == BUFIDX(ab_read)) {
              // it looks like some kind of aliasing has happened
              if (seq_order(ab_read, curframe->sequence_number)) {
                ab_read = curframe->sequence_number;
                debug(1, "Aliasing of buffer index -- reset.");
              }
            } else {
              debug(1, "Inconsistent sequence numbers detected");
            }
          }

          if ((flush_rtp_timestamp != 0) &&
              ((curframe->timestamp == flush_rtp_timestamp) ||
               seq32_order(curframe->timestamp, flush_rtp_timestamp))) {
            debug(1, "Dropping flushed packet seqno %u, timestamp %u", curframe->sequence_number,
                  curframe->timestamp);
            curframe->ready = 0;
            flush_limit++;
            ab_read = SUCCESSOR(ab_read);
          }
          if ((flush_rtp_timestamp != 0) &&
              (!seq32_order(curframe->timestamp,
                            flush_rtp_timestamp))) // if we have gone past the flush boundary time
            flush_rtp_timestamp = 0;
        }
      } while ((flush_rtp_timestamp != 0) && (flush_limit <= 8820) && (curframe->ready == 0));

      if (flush_limit == 8820) {
        debug(1, "Flush hit the 8820 frame limit!");
        flush_limit = 0;
      }

      curframe = audio_buffer + BUFIDX(ab_read);

      if (curframe->ready) {
        if (ab_buffering) { // if we are getting packets but not yet forwarding them to the player
          if (first_packet_timestamp == 0) { // if this is the very first packet
            // debug(1,"First frame seen, time %u, with %d
            // frames...",curframe->timestamp,seq_diff(ab_read, ab_write));
            uint32_t reference_timestamp;
            uint64_t reference_timestamp_time,remote_reference_timestamp_time;
            get_reference_timestamp_stuff(&reference_timestamp, &reference_timestamp_time, &remote_reference_timestamp_time);
            if (reference_timestamp) { // if we have a reference time
              // debug(1,"First frame seen with timestamp...");
              first_packet_timestamp = curframe->timestamp; // we will keep buffering until we are
                                                            // supposed to start playing this

              // Here, calculate when we should start playing. We need to know when to allow the
              // packets to be sent to the player.
              // We will send packets of silence from now until that time and then we will send the
              // first packet, which will be followed by the subsequent packets.

              // we will get a fix every second or so, which will be stored as a pair consisting of
              // the time when the packet with a particular timestamp should be played, neglecting
              // latencies, etc.

              // It probably won't  be the timestamp of our first packet, however, so we might have
              // to do some calculations.

              // To calculate when the first packet will be played, we figure out the exact time the
              // packet should be played according to its timestamp and the reference time.
              // We then need to add the desired latency, typically 88200 frames.

              // Then we need to offset this by the backend latency offset. For example, if we knew
              // that the audio back end has a latency of 100 ms, we would
              // ask for the first packet to be emitted 100 ms earlier than it should, i.e. -4410
              // frames, so that when it got through the audio back end,
              // if would be in sync. To do this, we would give it a latency offset of -100 ms, i.e.
              // -4410 frames.

              int64_t delta = ((int64_t)first_packet_timestamp - (int64_t)reference_timestamp);

              first_packet_time_to_play =
                  reference_timestamp_time +
                  ((delta + (int64_t)config.latency + (int64_t)config.audio_backend_latency_offset)
                   << 32) /
                      44100;

              if (local_time_now >= first_packet_time_to_play) {
                debug(
                    1,
                    "First packet is late! It should have played before now. Flushing 0.1 seconds");
                player_flush(first_packet_timestamp + 4410);
              }
            }
          }

          if (first_packet_time_to_play != 0) {

            uint32_t filler_size = frame_size;
            uint32_t max_dac_delay = 4410;
            filler_size = 4410; // 0.1 second -- the maximum we'll add to the DAC

            if (local_time_now >= first_packet_time_to_play) {
              // we've gone past the time...
              // debug(1,"Run past the exact start time by %llu frames, with time now of %llx, fpttp
              // of %llx and dac_delay of %d and %d packets;
              // flush.",(((tn-first_packet_time_to_play)*44100)>>32)+dac_delay,tn,first_packet_time_to_play,dac_delay,seq_diff(ab_read,
              // ab_write));

              if (config.output->flush)
                config.output->flush();
              ab_resync();
              first_packet_timestamp = 0;
              first_packet_time_to_play = 0;
              time_since_play_started = 0;
            } else {
              if (config.output->delay) {
                dac_delay = config.output->delay();
                if (dac_delay == -1) {
                  debug(1, "Error getting dac_delay in buffer_get_frame.");
                  dac_delay = 0;
                }
              } else
                dac_delay = 0;
              uint64_t gross_frame_gap =
                  ((first_packet_time_to_play - local_time_now) * 44100) >> 32;
              int64_t exact_frame_gap = gross_frame_gap - dac_delay;
              if (exact_frame_gap <= 0) {
                // we've gone past the time...
                // debug(1,"Run a bit past the exact start time by %lld frames, with time now of
                // %llx, fpttp of %llx and dac_delay of %d and %d packets;
                // flush.",-exact_frame_gap,tn,first_packet_time_to_play,dac_delay,seq_diff(ab_read,
                // ab_write));
                if (config.output->flush)
                  config.output->flush();
                ab_resync();
                first_packet_timestamp = 0;
                first_packet_time_to_play = 0;
              } else {
                uint32_t fs = filler_size;
                if (fs > (max_dac_delay - dac_delay))
                  fs = max_dac_delay - dac_delay;
                if ((exact_frame_gap <= fs) || (exact_frame_gap <= frame_size * 2)) {
                  fs = exact_frame_gap;
                  // debug(1,"Exact frame gap is %llu; play %d frames of silence. Dac_delay is %d,
                  // with %d packets, ab_read is %04x, ab_write is
                  // %04x.",exact_frame_gap,fs,dac_delay,seq_diff(ab_read,
                  // ab_write),ab_read,ab_write);
                  ab_buffering = 0;
                }
                signed short *silence;
                silence = malloc(FRAME_BYTES(fs));
                memset(silence, 0, FRAME_BYTES(fs));
                // debug(1,"Exact frame gap is %llu; play %d frames of silence. Dac_delay is %d,
                // with %d packets.",exact_frame_gap,fs,dac_delay,seq_diff(ab_read, ab_write));
                config.output->play(silence, fs);
                free(silence);
                if (ab_buffering == 0) {
                  uint64_t reference_timestamp_time; // don't need this...
                  get_reference_timestamp_stuff(&play_segment_reference_frame, &reference_timestamp_time, &play_segment_reference_frame_remote_time);
#ifdef CONFIG_METADATA
                  send_ssnc_metadata('prsm', NULL, 0, 0); // "resume", but don't wait if the queue is locked
#endif
                }
              }
            }
          }
        }
      }
    }

    // Here, we work out whether to release a packet or wait
    // We release a buffer when the time is right.

    // To work out when the time is right, we need to take account of (1) the actual time the packet
    // should be released,
    // (2) the latency requested, (3) the audio backend latency offset and (4) the desired length of
    // the audio backend's buffer

    // The time is right if the current time is later or the same as
    // The packet time + (latency + latency offset - backend_buffer_length).
    // Note: the last three items are expressed in frames and must be converted to time.

    int do_wait = 1;
    if ((ab_synced) && (curframe) && (curframe->ready) && (curframe->timestamp)) {
      uint32_t reference_timestamp;
      uint64_t reference_timestamp_time,remote_reference_timestamp_time;
      get_reference_timestamp_stuff(&reference_timestamp, &reference_timestamp_time, &remote_reference_timestamp_time);
      if (reference_timestamp) { // if we have a reference time
        uint32_t packet_timestamp = curframe->timestamp;
        int64_t delta = ((int64_t)packet_timestamp - (int64_t)reference_timestamp);
        int64_t offset = (int64_t)config.latency + config.audio_backend_latency_offset -
                         (int64_t)config.audio_backend_buffer_desired_length;
        int64_t net_offset = delta + offset;
        int64_t time_to_play = reference_timestamp_time;
        int64_t net_offset_fp_sec;
        if (net_offset >= 0) {
          net_offset_fp_sec = (net_offset << 32) / 44100;
          time_to_play += net_offset_fp_sec; // using the latency requested...
          // debug(2,"Net Offset: %lld, adjusted: %lld.",net_offset,net_offset_fp_sec);
        } else {
          net_offset_fp_sec = ((-net_offset) << 32) / 44100;
          time_to_play -= net_offset_fp_sec;
          // debug(2,"Net Offset: %lld, adjusted: -%lld.",net_offset,net_offset_fp_sec);
        }

        if (local_time_now >= time_to_play) {
          do_wait = 0;
        }
      }
    }
    wait = (ab_buffering || (do_wait != 0) || (!ab_synced)) && (!please_stop);

    if (wait) {
      uint64_t time_to_wait_for_wakeup_fp =
          ((uint64_t)1 << 32) / 44100;       // this is time period of one frame
      time_to_wait_for_wakeup_fp *= 4 * 352; // four full 352-frame packets
      time_to_wait_for_wakeup_fp /= 3; // four thirds of a packet time

#ifdef COMPILE_FOR_LINUX_AND_FREEBSD
      uint64_t time_of_wakeup_fp = local_time_now + time_to_wait_for_wakeup_fp;
      uint64_t sec = time_of_wakeup_fp >> 32;
      uint64_t nsec = ((time_of_wakeup_fp & 0xffffffff) * 1000000000) >> 32;

      struct timespec time_of_wakeup;
      time_of_wakeup.tv_sec = sec;
      time_of_wakeup.tv_nsec = nsec;

      pthread_cond_timedwait(&flowcontrol, &ab_mutex, &time_of_wakeup);
// int rc = pthread_cond_timedwait(&flowcontrol,&ab_mutex,&time_of_wakeup);
// if (rc!=0)
//  debug(1,"pthread_cond_timedwait returned error code %d.",rc);
#endif
#ifdef COMPILE_FOR_OSX
      uint64_t sec = time_to_wait_for_wakeup_fp >> 32;
      ;
      uint64_t nsec = ((time_to_wait_for_wakeup_fp & 0xffffffff) * 1000000000) >> 32;
      struct timespec time_to_wait;
      time_to_wait.tv_sec = sec;
      time_to_wait.tv_nsec = nsec;
      pthread_cond_timedwait_relative_np(&flowcontrol, &ab_mutex, &time_to_wait);
#endif
    }
  } while (wait);
Exemple #15
0
/*
 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
 */
static int
vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled)
{
	struct vcpu *vcpu;
	const char *wmesg;
	int vcpu_halted, vm_halted;
	const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */

	KASSERT(!CPU_ISSET(((unsigned) vcpuid), &vm->halted_cpus),
		("vcpu already halted"));

	vcpu = &vm->vcpu[vcpuid];
	vcpu_halted = 0;
	vm_halted = 0;

	vcpu_lock(vcpu);
	while (1) {
		/*
		 * Do a final check for pending NMI or interrupts before
		 * really putting this thread to sleep. Also check for
		 * software events that would cause this vcpu to wakeup.
		 *
		 * These interrupts/events could have happened after the
		 * vcpu returned from VMRUN() and before it acquired the
		 * vcpu lock above.
		 */
		if (vm->rendezvous_func != NULL || vm->suspend)
			break;
		if (vm_nmi_pending(vm, vcpuid))
			break;
		if (!intr_disabled) {
			if (vm_extint_pending(vm, vcpuid) ||
			    vlapic_pending_intr(vcpu->vlapic, NULL)) {
				break;
			}
		}

		/*
		 * Some Linux guests implement "halt" by having all vcpus
		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
		 * track of the vcpus that have entered this state. When all
		 * vcpus enter the halted state the virtual machine is halted.
		 */
		if (intr_disabled) {
			wmesg = "vmhalt";
			VCPU_CTR0(vm, vcpuid, "Halted");
			if (!vcpu_halted && halt_detection_enabled) {
				vcpu_halted = 1;
				CPU_SET_ATOMIC(((unsigned) vcpuid), &vm->halted_cpus);
			}
			if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
				vm_halted = 1;
				break;
			}
		} else {
			wmesg = "vmidle";
		}

		//t = ticks;
		vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
		/*
		 * XXX msleep_spin() cannot be interrupted by signals so
		 * wake up periodically to check pending signals.
		 */
		pthread_mutex_lock(&vcpu->vcpu_sleep_mtx);
		vcpu_unlock(vcpu);
		pthread_cond_timedwait_relative_np(&vcpu->vcpu_sleep_cnd,
			&vcpu->vcpu_sleep_mtx, &ts);
		vcpu_lock(vcpu);
		pthread_mutex_unlock(&vcpu->vcpu_sleep_mtx);
		//msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
		vcpu_require_state_locked(vcpu, VCPU_FROZEN);
		//vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
	}

	if (vcpu_halted)
		CPU_CLR_ATOMIC(((unsigned) vcpuid), &vm->halted_cpus);

	vcpu_unlock(vcpu);

	if (vm_halted)
		vm_suspend(vm, VM_SUSPEND_HALT);

	return (0);
}

static int
vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
{
	struct vie *vie;
	struct vcpu *vcpu;
	struct vm_exit *vme;
	uint64_t gla, gpa, cs_base;
	struct vm_guest_paging *paging;
	mem_region_read_t mread;
	mem_region_write_t mwrite;
	enum vm_cpu_mode cpu_mode;
	int cs_d, error, fault, length;

	vcpu = &vm->vcpu[vcpuid];
	vme = &vcpu->exitinfo;

	gla = vme->u.inst_emul.gla;
	gpa = vme->u.inst_emul.gpa;
	cs_base = vme->u.inst_emul.cs_base;
	cs_d = vme->u.inst_emul.cs_d;
	vie = &vme->u.inst_emul.vie;
	paging = &vme->u.inst_emul.paging;
	cpu_mode = paging->cpu_mode;

	VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#llx", gpa);

	/* Fetch, decode and emulate the faulting instruction */
	if (vie->num_valid == 0) {
		/*
		 * If the instruction length is not known then assume a
		 * maximum size instruction.
		 */
		length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE;
		error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
		    cs_base, length, vie, &fault);
	} else {
		/*
		 * The instruction bytes have already been copied into 'vie'
		 */
		error = fault = 0;
	}
	if (error || fault)
		return (error);

	if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
		VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#llx",
		    vme->rip + cs_base);
		*retu = true;	    /* dump instruction bytes in userspace */
		return (0);
	}

	/*
	 * If the instruction length was not specified then update it now
	 * along with 'nextrip'.
	 */
	if (vme->inst_length == 0) {
		vme->inst_length = vie->num_processed;
		vcpu->nextrip += vie->num_processed;
	}
 
	/* return to userland unless this is an in-kernel emulated device */
	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + XHYVE_PAGE_SIZE) {
		mread = lapic_mmio_read;
		mwrite = lapic_mmio_write;
	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
		mread = vioapic_mmio_read;
		mwrite = vioapic_mmio_write;
	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
		mread = vhpet_mmio_read;
		mwrite = vhpet_mmio_write;
	} else {
		*retu = true;
		return (0);
	}

	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
	    mread, mwrite, retu);

	return (error);
}

static int
vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
{
	int i, done;
	struct vcpu *vcpu;
	const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */

	done = 0;
	vcpu = &vm->vcpu[vcpuid];

	CPU_SET_ATOMIC(((unsigned) vcpuid), &vm->suspended_cpus);

	/*
	 * Wait until all 'active_cpus' have suspended themselves.
	 *
	 * Since a VM may be suspended at any time including when one or
	 * more vcpus are doing a rendezvous we need to call the rendezvous
	 * handler while we are waiting to prevent a deadlock.
	 */
	vcpu_lock(vcpu);
	while (1) {
		if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
			VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
			break;
		}

		if (vm->rendezvous_func == NULL) {
			VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
			vcpu_require_state_locked(vcpu, VCPU_SLEEPING);

			pthread_mutex_lock(&vcpu->vcpu_sleep_mtx);
			vcpu_unlock(vcpu);
			pthread_cond_timedwait_relative_np(&vcpu->vcpu_sleep_cnd,
				&vcpu->vcpu_sleep_mtx, &ts);
			vcpu_lock(vcpu);
			pthread_mutex_unlock(&vcpu->vcpu_sleep_mtx);
			//msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);

			vcpu_require_state_locked(vcpu, VCPU_FROZEN);
		} else {
			VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
			vcpu_unlock(vcpu);
			vm_handle_rendezvous(vm, vcpuid);
			vcpu_lock(vcpu);
		}
	}
	vcpu_unlock(vcpu);

	/*
	 * Wakeup the other sleeping vcpus and return to userspace.
	 */
	for (i = 0; i < VM_MAXCPU; i++) {
		if (CPU_ISSET(((unsigned) i), &vm->suspended_cpus)) {
			vcpu_notify_event(vm, i, false);
		}
	}

	*retu = true;
	return (0);
}

int
vm_suspend(struct vm *vm, enum vm_suspend_how how)
{
	int i;

	if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
		return (EINVAL);

	if (atomic_cmpset_int(((volatile u_int *) &vm->suspend), 0, how) == 0) {
		VM_CTR2(vm, "virtual machine already suspended %d/%d",
		    vm->suspend, how);
		return (EALREADY);
	}

	VM_CTR1(vm, "virtual machine successfully suspended %d", how);

	/*
	 * Notify all active vcpus that they are now suspended.
	 */
	for (i = 0; i < VM_MAXCPU; i++) {
		if (CPU_ISSET(((unsigned) i), &vm->active_cpus))
			vcpu_notify_event(vm, i, false);
	}

	return (0);
}
Exemple #16
0
static int
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
    bool from_idle)
{
	int error;
	const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */

	/*
	 * State transitions from the vmmdev_ioctl() must always begin from
	 * the VCPU_IDLE state. This guarantees that there is only a single
	 * ioctl() operating on a vcpu at any point.
	 */
	if (from_idle) {
		while (vcpu->state != VCPU_IDLE) {
			pthread_mutex_lock(&vcpu->state_sleep_mtx);
			vcpu_unlock(vcpu);
			pthread_cond_timedwait_relative_np(&vcpu->state_sleep_cnd,
				&vcpu->state_sleep_mtx, &ts);
			vcpu_lock(vcpu);
			pthread_mutex_unlock(&vcpu->state_sleep_mtx);
			//msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
		}
	} else {
		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
		    "vcpu idle state"));
	}

	/*
	 * The following state transitions are allowed:
	 * IDLE -> FROZEN -> IDLE
	 * FROZEN -> RUNNING -> FROZEN
	 * FROZEN -> SLEEPING -> FROZEN
	 */
	switch (vcpu->state) {
	case VCPU_IDLE:
	case VCPU_RUNNING:
	case VCPU_SLEEPING:
		error = (newstate != VCPU_FROZEN);
		break;
	case VCPU_FROZEN:
		error = (newstate == VCPU_FROZEN);
		break;
	}

	if (error)
		return (EBUSY);

	vcpu->state = newstate;

	if (newstate == VCPU_IDLE)
		pthread_cond_broadcast(&vcpu->state_sleep_cnd);
		//wakeup(&vcpu->state);

	return (0);
}

static void
vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
{
	int error;

	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
		xhyve_abort("Error %d setting state to %d\n", error, newstate);
}
Exemple #17
0
    uint32_t Wait(uint32_t milliseconds, bool alertable)
    {
        UNREFERENCED_PARAMETER(alertable);

        timespec endTime;
#if HAVE_MACH_ABSOLUTE_TIME
        uint64_t endMachTime;
        if (milliseconds != INFINITE)
        {
            uint64_t nanoseconds = (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds;
            NanosecondsToTimeSpec(nanoseconds, &endTime);
            endMachTime = mach_absolute_time() + nanoseconds * g_TimebaseInfo.denom / g_TimebaseInfo.numer;
        }
#elif HAVE_PTHREAD_CONDATTR_SETCLOCK
        if (milliseconds != INFINITE)
        {
            clock_gettime(CLOCK_MONOTONIC, &endTime);
            TimeSpecAdd(&endTime, milliseconds);
        }
#else
#error Don't know how to perfom timed wait on this platform
#endif

        int st = 0;

        pthread_mutex_lock(&m_mutex);
        while (!m_state)
        {
            if (milliseconds == INFINITE)
            {
                st = pthread_cond_wait(&m_condition, &m_mutex);
            }
            else
            {
#if HAVE_MACH_ABSOLUTE_TIME
                // Since OSX doesn't support CLOCK_MONOTONIC, we use relative variant of the 
                // timed wait and we need to handle spurious wakeups properly.
                st = pthread_cond_timedwait_relative_np(&m_condition, &m_mutex, &endTime);
                if ((st == 0) && !m_state)
                {
                    uint64_t machTime = mach_absolute_time();
                    if (machTime < endMachTime)
                    {
                        // The wake up was spurious, recalculate the relative endTime
                        uint64_t remainingNanoseconds = (endMachTime - machTime) * g_TimebaseInfo.numer / g_TimebaseInfo.denom;
                        NanosecondsToTimeSpec(remainingNanoseconds, &endTime);
                    }
                    else
                    {
                        // Although the timed wait didn't report a timeout, time calculated from the
                        // mach time shows we have already reached the end time. It can happen if
                        // the wait was spuriously woken up right before the timeout.
                        st = ETIMEDOUT;
                    }
                }
#else // HAVE_MACH_ABSOLUTE_TIME
                st = pthread_cond_timedwait(&m_condition, &m_mutex, &endTime);
#endif // HAVE_MACH_ABSOLUTE_TIME
                // Verify that if the wait timed out, the event was not set
                assert((st != ETIMEDOUT) || !m_state);
            }

            if (st != 0)
            {
                // wait failed or timed out
                break;
            }
        }

        if ((st == 0) && !m_manualReset)
        {
            // Clear the state for auto-reset events so that only one waiter gets released
            m_state = false;
        }

        pthread_mutex_unlock(&m_mutex);

        uint32_t waitStatus;

        if (st == 0)
        {
            waitStatus = WAIT_OBJECT_0;
        }
        else if (st == ETIMEDOUT)
        {
            waitStatus = WAIT_TIMEOUT;
        }
        else
        {
            waitStatus = WAIT_FAILED;
        }

        return waitStatus;
    }
Exemple #18
0
extern "C" int pthread_cond_timeout_np(pthread_cond_t* cond_interface,
                                       pthread_mutex_t* mutex, unsigned ms) {
  timespec ts;
  timespec_from_ms(ts, ms);
  return pthread_cond_timedwait_relative_np(cond_interface, mutex, &ts);
}
Exemple #19
0
bool	CAGuard::WaitFor(UInt64 inNanos)
{
	bool theAnswer = false;

#if TARGET_OS_MAC
	ThrowIf(!pthread_equal(pthread_self(), mOwner), CAException(1), "CAGuard::WaitFor: A thread has to have locked a guard be for it can wait");

	#if	Log_TimedWaits
		DebugMessageN1("CAGuard::WaitFor: waiting %.0f", (Float64)inNanos);
	#endif

	struct timespec	theTimeSpec;
	static const UInt64	kNanosPerSecond = 1000000000ULL;
	if(inNanos >= kNanosPerSecond)
	{
		theTimeSpec.tv_sec = static_cast<UInt32>(inNanos / kNanosPerSecond);
		theTimeSpec.tv_nsec = static_cast<UInt32>(inNanos % kNanosPerSecond);
	}
	else
	{
		theTimeSpec.tv_sec = 0;
		theTimeSpec.tv_nsec = static_cast<UInt32>(inNanos);
	}
	
	#if	Log_TimedWaits || Log_Latency || Log_Average_Latency
		UInt64	theStartNanos = CAHostTimeBase::GetCurrentTimeInNanos();
	#endif

	mOwner = 0;

	#if	Log_WaitOwnership
		DebugPrintfRtn(DebugPrintfFileComma "%p %.4f: CAGuard::WaitFor: thread %p is waiting on %s, owner: %p\n", pthread_self(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), pthread_self(), mName, mOwner);
	#endif

	OSStatus theError = pthread_cond_timedwait_relative_np(&mCondVar, &mMutex, &theTimeSpec);
	ThrowIf((theError != 0) && (theError != ETIMEDOUT), CAException(theError), "CAGuard::WaitFor: Wait got an error");
	mOwner = pthread_self();
	
	#if	Log_TimedWaits || Log_Latency || Log_Average_Latency
		UInt64	theEndNanos = CAHostTimeBase::GetCurrentTimeInNanos();
	#endif
	
	#if	Log_TimedWaits
		DebugMessageN1("CAGuard::WaitFor: waited  %.0f", (Float64)(theEndNanos - theStartNanos));
	#endif
	
	#if	Log_Latency
		DebugMessageN1("CAGuard::WaitFor: latency  %.0f", (Float64)((theEndNanos - theStartNanos) - inNanos));
	#endif
	
	#if	Log_Average_Latency
		++mAverageLatencyCount;
		mAverageLatencyAccumulator += (theEndNanos - theStartNanos) - inNanos;
		if(mAverageLatencyCount >= 50)
		{
			DebugMessageN2("CAGuard::WaitFor: average latency  %.3f ns over %ld waits", mAverageLatencyAccumulator / mAverageLatencyCount, mAverageLatencyCount);
			mAverageLatencyCount = 0;
			mAverageLatencyAccumulator = 0.0;
		}
	#endif

	#if	Log_WaitOwnership
		DebugPrintfRtn(DebugPrintfFileComma "%p %.4f: CAGuard::WaitFor: thread %p waited on %s, owner: %p\n", pthread_self(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), pthread_self(), mName, mOwner);
	#endif

	theAnswer = theError == ETIMEDOUT;
#elif TARGET_OS_WIN32
	ThrowIf(GetCurrentThreadId() != mOwner, CAException(1), "CAGuard::WaitFor: A thread has to have locked a guard be for it can wait");

	#if	Log_TimedWaits
		DebugMessageN1("CAGuard::WaitFor: waiting %.0f", (Float64)inNanos);
	#endif

	//	the time out is specified in milliseconds(!)
	UInt32 theWaitTime = static_cast<UInt32>(inNanos / 1000000ULL);

	#if	Log_TimedWaits || Log_Latency || Log_Average_Latency
		UInt64	theStartNanos = CAHostTimeBase::GetCurrentTimeInNanos();
	#endif

	mOwner = 0;

	#if	Log_WaitOwnership
		DebugPrintfRtn(DebugPrintfFileComma "%lu %.4f: CAGuard::WaitFor: thread %lu is waiting on %s, owner: %lu\n", GetCurrentThreadId(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), GetCurrentThreadId(), mName, mOwner);
	#endif
	
	ReleaseMutex(mMutex);
	HANDLE theHandles[] = { mMutex, mEvent };
	OSStatus theError = WaitForMultipleObjects(2, theHandles, true, theWaitTime);
	ThrowIf((theError != WAIT_OBJECT_0) && (theError != WAIT_TIMEOUT), CAException(GetLastError()), "CAGuard::WaitFor: Wait got an error");
	mOwner = GetCurrentThreadId();
	ResetEvent(mEvent);
	
	#if	Log_TimedWaits || Log_Latency || Log_Average_Latency
		UInt64	theEndNanos = CAHostTimeBase::GetCurrentTimeInNanos();
	#endif
	
	#if	Log_TimedWaits
		DebugMessageN1("CAGuard::WaitFor: waited  %.0f", (Float64)(theEndNanos - theStartNanos));
	#endif
	
	#if	Log_Latency
		DebugMessageN1("CAGuard::WaitFor: latency  %.0f", (Float64)((theEndNanos - theStartNanos) - inNanos));
	#endif
	
	#if	Log_Average_Latency
		++mAverageLatencyCount;
		mAverageLatencyAccumulator += (theEndNanos - theStartNanos) - inNanos;
		if(mAverageLatencyCount >= 50)
		{
			DebugMessageN2("CAGuard::WaitFor: average latency  %.3f ns over %ld waits", mAverageLatencyAccumulator / mAverageLatencyCount, mAverageLatencyCount);
			mAverageLatencyCount = 0;
			mAverageLatencyAccumulator = 0.0;
		}
	#endif

	#if	Log_WaitOwnership
		DebugPrintfRtn(DebugPrintfFileComma "%lu %.4f: CAGuard::WaitFor: thread %lu waited on %s, owner: %lu\n", GetCurrentThreadId(), ((Float64)(CAHostTimeBase::GetCurrentTimeInNanos()) / 1000000.0), GetCurrentThreadId(), mName, mOwner);
	#endif

	theAnswer = theError == WAIT_TIMEOUT;
#endif

	return theAnswer;
}