Ejemplo n.º 1
0
void CAudioEncoder::ForwardEncodedAudioFrames(void)
{
  u_int8_t* pFrame;
  u_int32_t frameLength;
  u_int32_t frameNumSamples;

  while (GetEncodedFrame(&pFrame, 
			 &frameLength, 
			 &frameNumSamples)) {

    // sanity check
    if (pFrame == NULL || frameLength == 0) {
#ifdef DEBUG_SYNC
      debug_message("%s:No frame", Profile()->GetName());
#endif
      break;
    }

    //debug_message("Got encoded frame");

    // output has frame start timestamp
    Timestamp output = DstSamplesToTicks(m_audioDstSampleNumber);

    m_audioDstFrameNumber++;
    m_audioDstSampleNumber += frameNumSamples;
    m_audioDstElapsedDuration = DstSamplesToTicks(m_audioDstSampleNumber);

    //debug_message("m_audioDstSampleNumber = %llu", m_audioDstSampleNumber);

    // forward the encoded frame to sinks

#ifdef DEBUG_SYNC
    debug_message("%s:audio forwarding "U64, 
		  Profile()->GetName(), output);
#endif
    CMediaFrame* pMediaFrame =
      new CMediaFrame(
		      GetFrameType(),
		      pFrame, 
		      frameLength,
		      m_audioStartTimestamp + output,
		      frameNumSamples,
		      m_audioDstSampleRate);

    ForwardFrame(pMediaFrame);
  }
}
Ejemplo n.º 2
0
void CTextEncoder::SendFrame (Timestamp t)
{
  CMediaFrame *mf;
  void *buf;
  uint32_t buflen;

  if (GetEncodedFrame(&buf, &buflen) == false) {
    return;
  }

  //debug_message("encode %p", buf);
  mf = new CMediaFrame(m_textDstType,
		       buf, 
		       buflen,
		       t);
#ifdef DEBUG_TEXT
  debug_message("frame len %u", buflen);
#endif
  ForwardFrame(mf);
}
Ejemplo n.º 3
0
void CMediaSource::ProcessAudioFrame(
				     u_int8_t* frameData,
				     u_int32_t frameDataLength,
				     Timestamp srcFrameTimestamp)
{
  if (m_audioSrcFrameNumber == 0) {
    if (!m_sourceVideo || m_videoSrcFrameNumber == 0) {
      m_encodingStartTimestamp = GetTimestamp();
    }
    m_audioStartTimestamp = srcFrameTimestamp;
#ifdef DEBUG_AUDIO_SYNC
    debug_message("m_audioStartTimestamp = "U64, m_audioStartTimestamp);
#endif
  }

  if (m_audioDstFrameNumber == 0) {
    // we wait until we see the first encoded frame.
    // this is because encoders usually buffer the first few
    // raw audio frames fed to them, and this number varies
    // from one encoder to another
    m_audioEncodingStartTimestamp = srcFrameTimestamp;
  }

  // we calculate audioSrcElapsedDuration by taking the current frame's
  // timestamp and subtracting the audioEncodingStartTimestamp (and NOT
  // the audioStartTimestamp).
  // this way, we just need to compare audioSrcElapsedDuration with 
  // audioDstElapsedDuration (which should match in the ideal case),
  // and we don't have to compensate for the lag introduced by the initial
  // buffering of source frames in the encoder, which may vary from
  // one encoder to another
  m_audioSrcElapsedDuration = srcFrameTimestamp - m_audioEncodingStartTimestamp;
  m_audioSrcFrameNumber++;

#if 0 
  // not needed
  if (resync) {
    // flush preEncodingBuffer
    m_audioPreEncodingBufferLength = 0;

    // change dst sample numbers to account for gap
    m_audioDstSampleNumber = m_audioDstRawSampleNumber
      = DstTicksToSamples(m_audioSrcElapsedDuration);
    error_message("Received resync");
  }
#endif

  bool pcmMalloced = false;
  bool pcmBuffered;
  u_int8_t* pcmData = frameData;
  u_int32_t pcmDataLength = frameDataLength;

  if (m_audioSrcChannels != m_audioDstChannels) {
    // Convert the channels if they don't match
    // we either double the channel info, or combine
    // the left and right
    uint32_t samples = SrcBytesToSamples(frameDataLength);
    uint32_t dstLength = DstSamplesToBytes(samples);
    pcmData = (u_int8_t *)Malloc(dstLength);
    pcmDataLength = dstLength;
    pcmMalloced = true;

    int16_t *src = (int16_t *)frameData;
    int16_t *dst = (int16_t *)pcmData;
    if (m_audioSrcChannels == 1) {
      // 1 channel to 2
      for (uint32_t ix = 0; ix < samples; ix++) {
	*dst++ = *src;
	*dst++ = *src++;
      }
    } else {
      // 2 channels to 1
      for (uint32_t ix = 0; ix < samples; ix++) {
	int32_t sum = *src++;
	sum += *src++;
	sum /= 2;
	if (sum < -32768) sum = -32768;
	else if (sum > 32767) sum = 32767;
	*dst++ = sum & 0xffff;
      }
    }
  }

  // resample audio, if necessary
  if (m_audioSrcSampleRate != m_audioDstSampleRate) {
    ResampleAudio(pcmData, pcmDataLength);

    // resampled data is now available in m_audioPreEncodingBuffer
    pcmBuffered = true;

  } else if (m_audioSrcSamplesPerFrame != m_audioDstSamplesPerFrame) {
    // reframe audio, if necessary
    // e.g. MP3 is 1152 samples/frame, AAC is 1024 samples/frame

    // add samples to end of m_audioBuffer
    // InitAudio() ensures that buffer is large enough
    memcpy(
	   &m_audioPreEncodingBuffer[m_audioPreEncodingBufferLength],
	   pcmData,
	   pcmDataLength);

    m_audioPreEncodingBufferLength += pcmDataLength;

    pcmBuffered = true;

  } else {
    pcmBuffered = false;
  }

  // LATER restructure so as get rid of this label, and goto below
 pcmBufferCheck:

  if (pcmBuffered) {
    u_int32_t samplesAvailable =
      DstBytesToSamples(m_audioPreEncodingBufferLength);

    // not enough samples collected yet to call encode or forward
    if (samplesAvailable < m_audioDstSamplesPerFrame) {
      return;
    }
    if (pcmMalloced) {
      free(pcmData);
      pcmMalloced = false;
    }

    // setup for encode/forward
    pcmData = &m_audioPreEncodingBuffer[0];
    pcmDataLength = DstSamplesToBytes(m_audioDstSamplesPerFrame);
  }


  // encode audio frame
  if (m_pConfig->m_audioEncode) {
    Duration frametime = DstSamplesToTicks(DstBytesToSamples(frameDataLength));

#ifdef DEBUG_AUDIO_SYNC
    debug_message("asrc# %d srcDuration="U64" dst# %d dstDuration "U64,
                  m_audioSrcFrameNumber, m_audioSrcElapsedDuration,
                  m_audioDstFrameNumber, m_audioDstElapsedDuration);
#endif

    // destination gets ahead of source
    // This has been observed as a result of clock frequency drift between
    // the sound card oscillator and the system mainbord oscillator
    // Example: If the sound card oscillator has a 'real' frequency that
    // is slightly larger than the 'rated' frequency, and we are sampling
    // at 32kHz, then the 32000 samples acquired from the sound card
    // 'actually' occupy a duration of slightly less than a second.
    // 
    // The clock drift is usually fraction of a Hz and takes a long
    // time (~ 20-30 minutes) before we are off by one frame duration

    if (m_audioSrcElapsedDuration + frametime < m_audioDstElapsedDuration) {
      debug_message("audio: dropping frame, SrcElapsedDuration="U64" DstElapsedDuration="U64,
                    m_audioSrcElapsedDuration, m_audioDstElapsedDuration);
      return;
    }

    // source gets ahead of destination
    // We tolerate a difference of 3 frames since A/V sync is usually
    // noticeable after that. This way we give the encoder a chance to pick up
    if (m_audioSrcElapsedDuration > (3 * frametime) + m_audioDstElapsedDuration) {
      int j = (int) (DstTicksToSamples(m_audioSrcElapsedDuration
                                       + (2 * frametime)
                                       - m_audioDstElapsedDuration)
                     / m_audioDstSamplesPerFrame);
      debug_message("audio: Adding %d silence frames", j);
      for (int k=0; k<j; k++)
        AddSilenceFrame();
    }

    //Timestamp encodingStartTimestamp = GetTimestamp();

    bool rc = m_audioEncoder->EncodeSamples(
                                            (int16_t*)pcmData,
                                            m_audioDstSamplesPerFrame,
                                            m_audioDstChannels);

    if (!rc) {
      debug_message("failed to encode audio");
      return;
    }

    // Disabled since we are not taking into account audio drift anymore
    /*
    Duration encodingTime =  (GetTimestamp() - encodingStartTimestamp);
    if (m_sourceRealTime && m_videoSource) {
      Duration drift;
      if (frametime <= encodingTime) {
        drift = encodingTime - frametime;
        m_videoSource->AddEncodingDrift(drift);
      }
    }
    */

    ForwardEncodedAudioFrames();

  }

  //Forward PCM Frames to Feeder Sink
  if ((m_pConfig->GetBoolValue(CONFIG_FEEDER_SINK_ENABLE) &&
       frameDataLength > 0)) {
    // make a copy of the pcm data if needed
    u_int8_t* FwdedData;

	FwdedData = (u_int8_t*)Malloc(frameDataLength);
	memcpy(FwdedData, frameData, frameDataLength);

    CMediaFrame* pFrame =
      new CMediaFrame(
		      RAWPCMAUDIOFRAME,
		      FwdedData,
		      frameDataLength,
		      srcFrameTimestamp,
		      0,
		      m_audioDstSampleRate);

   ForwardFrame(pFrame);
  }
  
  // if desired, forward raw audio to sinks
  if (m_pConfig->SourceRawAudio() && pcmDataLength > 0) {

    // make a copy of the pcm data if needed
    u_int8_t* pcmForwardedData;

    if (!pcmMalloced) {
      pcmForwardedData = (u_int8_t*)Malloc(pcmDataLength);
      memcpy(pcmForwardedData, pcmData, pcmDataLength);
    } else {
      pcmForwardedData = pcmData;
      pcmMalloced = false;
    }
#ifndef WORDS_BIGENDIAN
    // swap byte ordering so we have big endian to write into
    // the file.
    uint16_t *pdata = (uint16_t *)pcmForwardedData;
    for (uint32_t ix = 0; 
	 ix < pcmDataLength; 
	 ix += sizeof(uint16_t),pdata++) {
      uint16_t swap = *pdata;
      *pdata = B2N_16(swap);
    }
#endif

    CMediaFrame* pFrame =
      new CMediaFrame(
		      PCMAUDIOFRAME, 
		      pcmForwardedData, 
		      pcmDataLength,
		      m_audioStartTimestamp 
		      + DstSamplesToTicks(m_audioDstRawSampleNumber),
		      DstBytesToSamples(pcmDataLength),
		      m_audioDstSampleRate);
    ForwardFrame(pFrame);

    m_audioDstRawSampleNumber += SrcBytesToSamples(pcmDataLength);
    m_audioDstRawFrameNumber++;
  }

  if (pcmMalloced) {
    free(pcmData);
  }

  if (pcmBuffered) {
    m_audioPreEncodingBufferLength -= pcmDataLength;
    memcpy(
	   &m_audioPreEncodingBuffer[0],
	   &m_audioPreEncodingBuffer[pcmDataLength],
	   m_audioPreEncodingBufferLength);

    goto pcmBufferCheck;
  }

}
Ejemplo n.º 4
0
void CALSAAudioSource::ProcessAudio(void)
{
	int err;
	
  if (m_audioSrcFrameNumber == 0) {
  	// Start the device
    if ((err = snd_pcm_start(m_pcmHandle)) < 0) {
      error_message("Couldn't start the PCM device: %s", snd_strerror(err));
    }
  }
	
  snd_pcm_status_t *status;
  snd_pcm_status_alloca(&status);

  // for efficiency, process 1 second before returning to check for commands
  for (int pass = 0; pass < m_maxPasses && m_stop_thread == false; pass++) {

    u_int8_t*     pcmFrameBuffer;
    pcmFrameBuffer = (u_int8_t*)malloc(m_pcmFrameSize);

    // The alsa frames is not the same as the pcm frames used to feed the encoder
    // Calculate how many alsa frames is neccesary to read to fill one pcm frame
	  snd_pcm_uframes_t num_frames = m_pcmFrameSize / (m_audioSrcChannels * sizeof(u_int16_t));
	  
		// Check how many bytes there is to read in the buffer, it will be used to calculate timestamp
    snd_pcm_status(m_pcmHandle, status);
		unsigned long avail_bytes = snd_pcm_status_get_avail(status) * (m_audioSrcChannels * sizeof(u_int16_t));
    Timestamp currentTime = GetTimestamp();
    Timestamp timestamp;

    // Read num_frames frames from the PCM device
    // pointed to by pcm_handle to buffer capdata.
    // Returns the number of frames actually read.
    // TODO On certain alsa configurations, e.g. when using dsnoop with low sample rate, the period gets too small. What to do about that?
    snd_pcm_sframes_t framesRead;
    if((framesRead = snd_pcm_readi(m_pcmHandle, pcmFrameBuffer, num_frames)) == -EPIPE) {
      snd_pcm_prepare(m_pcmHandle);
      // Buffer Overrun. This means the audio buffer is full, and not capturing
      // we want to make the timestamp based on the previous one
      // When we hit this case, we start using the m_timestampOverflowArray
      // This will give us a timestamp for when the array is full.
      // 
      // In other words, if we have a full audio buffer (ie: it's not loading
      // any more), we start storing the current timestamp into the array.
      // This will let us "catch up", and have a somewhat accurate timestamp
      // when we loop around
      // 
      // wmay - I'm not convinced that this actually works - if the buffer
      // cleans up, we'll ignore m_timestampOverflowArray
      if (m_timestampOverflowArray != NULL && m_timestampOverflowArray[m_timestampOverflowArrayIndex] != 0) {
        timestamp = m_timestampOverflowArray[m_timestampOverflowArrayIndex];
      } else {
        timestamp = m_prevTimestamp + SrcSamplesToTicks(avail_bytes);
      }

      if (m_timestampOverflowArray != NULL)
        m_timestampOverflowArray[m_timestampOverflowArrayIndex] = currentTime;

      debug_message("audio buffer full !");
    } else {
      if (framesRead < (snd_pcm_sframes_t) num_frames) {
        error_message("Bad audio read. Expected %li frames, got %li", num_frames, framesRead);
        free(pcmFrameBuffer);
        continue;
      }

      // buffer is not full - so, we make the timestamp based on the number
      // of bytes in the buffer that we read.
      timestamp = currentTime - SrcSamplesToTicks(SrcBytesToSamples(avail_bytes));
      if (m_timestampOverflowArray != NULL)
        m_timestampOverflowArray[m_timestampOverflowArrayIndex] = 0;
    }
    //debug_message("alsa read");
#ifdef DEBUG_TIMESTAMPS
    debug_message("avail_bytes=%lu t="U64" timestamp="U64" delta="U64,
                  avail_bytes, currentTime, timestamp, timestamp - m_prevTimestamp);
#endif

    m_prevTimestamp = timestamp;
    if (m_timestampOverflowArray != NULL) {
      m_timestampOverflowArrayIndex = (m_timestampOverflowArrayIndex + 1) % m_audioMaxBufferFrames;
    }
#ifdef DEBUG_TIMESTAMPS
    debug_message("pcm forward "U64" %u", timestamp, m_pcmFrameSize);
#endif
    if (m_audioSrcFrameNumber == 0 && m_videoSource != NULL) {
      m_videoSource->RequestKeyFrame(timestamp);
    }
    m_audioSrcFrameNumber++;
    CMediaFrame *frame = new CMediaFrame(PCMAUDIOFRAME,
					 pcmFrameBuffer, 
					 m_pcmFrameSize, 
					 timestamp);
    ForwardFrame(frame);
  }
}
Ejemplo n.º 5
0
void CMediaSource::ProcessVideoYUVFrame(
					u_int8_t* pY,
					u_int8_t* pU,
					u_int8_t* pV,
					u_int16_t yStride,
					u_int16_t uvStride,
					Timestamp srcFrameTimestamp)
{
  if (m_videoSrcFrameNumber == 0) {
    if (m_audioSrcFrameNumber == 0) {
      m_encodingStartTimestamp = GetTimestamp();
    }
    m_videoStartTimestamp = srcFrameTimestamp;
  }

  m_videoSrcFrameNumber++;
  m_videoSrcElapsedDuration = srcFrameTimestamp - m_videoStartTimestamp;

#ifdef DEBUG_VIDEO_SYNC
  debug_message("vsrc# %d srcDuration="U64" dst# %d dstDuration "U64,
                m_videoSrcFrameNumber, m_videoSrcElapsedDuration,
                m_videoDstFrameNumber, m_videoDstElapsedDuration);
#endif

  // destination gets ahead of source
  // drop src frames as needed to match target frame rate
  if (m_videoSrcElapsedDuration + m_videoDstFrameDuration < m_videoDstElapsedDuration) {
#ifdef DEBUG_VIDEO_SYNC
    debug_message("video: dropping frame, SrcElapsedDuration="U64" DstElapsedDuration="U64,
                  m_videoSrcElapsedDuration, m_videoDstElapsedDuration);
#endif
    return;
  }

  Duration lag = m_videoSrcElapsedDuration - m_videoDstElapsedDuration;

  // source gets ahead of destination
  if (lag > 3 * m_videoDstFrameDuration) {
    debug_message("lag "D64" src "U64" dst "U64,
		  lag, m_videoSrcElapsedDuration, m_videoDstElapsedDuration);
    int j = (lag - (2 * m_videoDstFrameDuration)) / m_videoDstFrameDuration;
    m_videoDstFrameNumber += j;
    m_videoDstElapsedDuration = VideoDstFramesToDuration();
    debug_message("video: advancing dst by %d frames", j);
  }

  // Disabled since we are not taking into account audio drift anymore
  // and the new algorithm automatically factors in any drift due
  // to video encoding
  /*
    // add any external drift (i.e. audio encoding drift)
    //to our drift measurement
    m_videoEncodingDrift += m_otherTotalDrift - m_otherLastTotalDrift;
    m_otherLastTotalDrift = m_otherTotalDrift;

    // check if the video encoding drift exceeds the max limit
    if (m_videoEncodingDrift >= m_videoEncodingMaxDrift) {
      // we skip multiple destination frames to give audio
      // a better chance to keep up
      // on subsequent calls, we will return immediately until
      // m_videoSrcElapsedDuration catches up with m_videoDstElapsedDuration
      int framesToSkip = m_videoEncodingDrift / m_videoDstFrameDuration;
      m_videoEncodingDrift -= framesToSkip * m_videoDstFrameDuration;
      m_videoDstFrameNumber += framesToSkip;
      m_videoDstElapsedDuration = VideoDstFramesToDuration();

      debug_message("video: will skip %d frames due to encoding drift", framesToSkip);

      return;
    }
  */

  m_videoEncodedFrames++;
  m_videoDstFrameNumber++;
  m_videoDstElapsedDuration = VideoDstFramesToDuration();

  //Timestamp encodingStartTimestamp = GetTimestamp();

  // this will either never happen (live capture)
  // or just happen once at startup when we discover
  // the stride used by the video decoder
  if (yStride != m_videoSrcYStride) {
    SetVideoSrcSize(m_videoSrcWidth, m_videoSrcHeight, 
		    yStride, m_videoMatchAspectRatios);
  }

  u_int8_t* mallocedYuvImage = NULL;

  // crop to desired aspect ratio (may be a no-op)
  u_int8_t* yImage = pY + m_videoSrcYCrop;
  u_int8_t* uImage = pU + m_videoSrcUVCrop;
  u_int8_t* vImage = pV + m_videoSrcUVCrop;

  // resize image if necessary
  if (m_videoYResizer) {
    u_int8_t* resizedYUV = (u_int8_t*)Malloc(m_videoDstYUVSize);
		
    u_int8_t* resizedY = resizedYUV;
    u_int8_t* resizedU = resizedYUV + m_videoDstYSize;
    u_int8_t* resizedV = resizedYUV + m_videoDstYSize + m_videoDstUVSize;

    m_videoSrcYImage->data = yImage;
    m_videoDstYImage->data = resizedY;
    scale_image_process(m_videoYResizer);

    m_videoSrcUVImage->data = uImage;
    m_videoDstUVImage->data = resizedU;
    scale_image_process(m_videoUVResizer);

    m_videoSrcUVImage->data = vImage;
    m_videoDstUVImage->data = resizedV;
    scale_image_process(m_videoUVResizer);

    // done with the original source image
    if (mallocedYuvImage) free(mallocedYuvImage);

    // switch over to resized version
    mallocedYuvImage = resizedYUV;
    yImage = resizedY;
    uImage = resizedU;
    vImage = resizedV;
    yStride = m_videoDstWidth;
    uvStride = yStride / 2;
  }

  if (m_videoFilterInterlace) {
    video_filter_interlace(yImage, yImage + m_videoDstYSize, yStride);
  }
  // if we want encoded video frames
  if (m_pConfig->m_videoEncode) {
    bool rc = m_videoEncoder->EncodeImage(
					  yImage, uImage, vImage, 
					  yStride, uvStride,
					  m_videoWantKeyFrame,
					  m_videoDstElapsedDuration,
					  srcFrameTimestamp);

    if (!rc) {
      debug_message("Can't encode image!");
      if (mallocedYuvImage) free(mallocedYuvImage);
      return;
    }

#ifdef DEBUG_VCODEC_SHADOW
  m_videoEncoderShadow->EncodeImage(
                                    yImage, uImage, vImage,
                                    yStride, uvStride,
                                    m_videoWantKeyFrame);
  //Note: we don't retrieve encoded frame from shadow
#endif

    m_videoWantKeyFrame = false;
  }

  // forward encoded video to sinks
  if (m_pConfig->m_videoEncode) {
    uint8_t *frame;
    uint32_t frame_len;
    bool got_image;
    Timestamp pts, dts;
    got_image = m_videoEncoder->GetEncodedImage(&frame,
						&frame_len,
						&dts,
						&pts);
    if (got_image) {
      //error_message("frame len %d time %llu", frame_len, out);
      CMediaFrame* pFrame = new CMediaFrame(
					    m_videoEncoder->GetFrameType(),
					    frame,
					    frame_len,
					    dts,
					    m_videoDstFrameDuration,
					    TimestampTicks,
					    pts);
      pFrame->SetMediaFreeFunction(m_videoEncoder->GetMediaFreeFunction());
      ForwardFrame(pFrame);
    }
  }

  // forward raw video to sinks
  if (m_pConfig->SourceRawVideo() ||
      m_pConfig->GetBoolValue(CONFIG_FEEDER_SINK_ENABLE)) {

    m_videoDstPrevImage = (u_int8_t*)Malloc(m_videoDstYUVSize);

    imgcpy(m_videoDstPrevImage, 
	   yImage, 
	   m_videoDstWidth,
	   m_videoDstHeight,
	   yStride);
    imgcpy(m_videoDstPrevImage + m_videoDstYSize,
	   uImage, 
	   m_videoDstWidth / 2,
	   m_videoDstHeight / 2,
	   uvStride);
    imgcpy(m_videoDstPrevImage + m_videoDstYSize + m_videoDstUVSize,
	   vImage, 
	   m_videoDstWidth / 2,
	   m_videoDstHeight / 2,
	   uvStride);

    CMediaFrame* pFrame =
      new CMediaFrame(
                      YUVVIDEOFRAME, 
                      m_videoDstPrevImage, 
                      m_videoDstYUVSize,
                      srcFrameTimestamp, 
                      m_videoDstFrameDuration);
    ForwardFrame(pFrame);
  }

  // forward reconstructed video to sinks
  if (m_pConfig->m_videoEncode
      && m_pConfig->GetBoolValue(CONFIG_VIDEO_ENCODED_PREVIEW)) {

    m_videoDstPrevReconstructImage = (u_int8_t*)Malloc(m_videoDstYUVSize);

    m_videoEncoder->GetReconstructedImage(
					  m_videoDstPrevReconstructImage,
					  m_videoDstPrevReconstructImage
					  + m_videoDstYSize,
					  m_videoDstPrevReconstructImage
					  + m_videoDstYSize + m_videoDstUVSize);

    CMediaFrame* pFrame = new CMediaFrame(RECONSTRUCTYUVVIDEOFRAME,
                                          m_videoDstPrevReconstructImage,
                                          m_videoDstYUVSize,
                                          srcFrameTimestamp,
                                          m_videoDstFrameDuration);
    ForwardFrame(pFrame);
  }

  // Disabled since we are not taking into account audio drift anymore
  /*
  // update the video encoding drift
  if (m_sourceRealTime) {
    Duration drift = GetTimestamp() - encodingStartTimestamp;
    if (drift > m_videoDstFrameDuration) {
      m_videoEncodingDrift += drift - m_videoDstFrameDuration;
    } else {
      drift = m_videoDstFrameDuration - drift;
      if (m_videoEncodingDrift > drift) {
	m_videoEncodingDrift -= drift;
      } else {
	m_videoEncodingDrift = 0;
      }
    }
  }
  */

  if (mallocedYuvImage) free(mallocedYuvImage);
}