Ejemplo n.º 1
0
void TestRunner::setAudioResult(JSContextRef context, JSValueRef data)
{
    WKRetainPtr<WKDataRef> audioData(AdoptWK, WKBundleCreateWKDataFromUInt8Array(InjectedBundle::shared().bundle(), context, data));
    InjectedBundle::shared().setAudioResult(audioData.get());
    m_whatToDump = Audio;
    m_dumpPixels = false;
}
Ejemplo n.º 2
0
    void StreamData(int streamNum, const void* data, int numSamples, int rate, int width, int channels, float volume, int entityNum) {
        if (not initialized or (streamNum < 0 or streamNum >= N_STREAMS)) {
            return;
        }

        if (not streams[streamNum]) {
            streams[streamNum] = std::make_shared<StreamingSound>();
            if (IsValidEntity(entityNum)) {
                AddSound(GetEmitterForEntity(entityNum), streams[streamNum], 1);
            } else {
                AddSound(GetLocalEmitter(), streams[streamNum], 1);
            }
        }

        streams[streamNum]->SetGain(volume);

	    AudioData audioData(rate, width, channels, (width * numSamples * channels),
	                        reinterpret_cast<const char*>(data));
	    AL::Buffer buffer;

	    int feedError = buffer.Feed(audioData);

        if (not feedError) {
            streams[streamNum]->AppendBuffer(std::move(buffer));
        }
    }
Ejemplo n.º 3
0
void TestRunner::setAudioResult(JSContextRef context, JSValueRef data)
{
    auto& injectedBundle = InjectedBundle::singleton();
    // FIXME (123058): Use a JSC API to get buffer contents once such is exposed.
    WKRetainPtr<WKDataRef> audioData(AdoptWK, WKBundleCreateWKDataFromUInt8Array(injectedBundle.bundle(), context, data));
    injectedBundle.setAudioResult(audioData.get());
    m_whatToDump = Audio;
    m_dumpPixels = false;
}
Ejemplo n.º 4
0
bool SpectralAnalysisTest::performTest(int nSamples, int windowSize)
{
    cout << "Testing ceilPowerOfTwo:" << endl;
    unsigned int r1 = AudioData::ceilPowerOfTwo(257);
    unsigned int r2 = AudioData::ceilPowerOfTwo(256);
    cout << "ceilPowerOfTwo(257) = " << r1 << endl;
    cout << "ceilPowerOfTwo(256) = " << r2 << endl;
    if (r1 != 512 || r2 != 256)
        return false;

    const double overlap = 0.5;
    double* data = new double[nSamples];
    const double pi = 4.0 * atan(1.0);

    srand((unsigned int) time(0));

    cout << "Original data:" << endl;
    for (int i = 0; i < nSamples; i++) {
        data[i] = sin(pi * i / (nSamples - 1));
        cout << data[i] << " ";
    }
    cout << endl << endl;

    AudioData audioData(vector<double*>(1, data), nSamples, 1000);

    // Actually, the Testsuite is single-threaded, but to be 100% sure we use
    // a mutex here.
    // We don't use the BasicApplication method since this is a component test
    // for AudioData.
    fftwMutex.lock();

    pair<Matrix*, Matrix*> spectrogram =
        audioData.computeSpectrogram(SqHannFunction, windowSize, overlap, 0);
    auto_ptr<Matrix> pAmplitudeMatrix(spectrogram.first);
    auto_ptr<Matrix> pPhaseMatrix(spectrogram.second);
    
    cout << "Amplitude spectrogram:" << endl;
    cout << *pAmplitudeMatrix << endl;
    
    cout << "Phase spectrogram:" << endl;
    cout << *pPhaseMatrix << endl;

    auto_ptr<AudioData> pAudioData2(AudioData::fromSpectrogram(*pAmplitudeMatrix, *pPhaseMatrix,
        SqHannFunction, windowSize, overlap, 1000));

    fftwMutex.unlock();

    cout << "Result data:" << endl;
    const double* data2 = pAudioData2->getChannel(0);
    for (unsigned int i = 0; i < pAudioData2->nrOfSamples(); i++) {
        cout << data2[i] << " ";
    }
    cout << endl;

    return true;
}
Ejemplo n.º 5
0
static void serverHandleCallback(void* data)
{
  if (data)
    {
      AudioData audioData(PortAudio::SAMPLE_RATE, PortAudio::CHANNEL_COUNT, RECORD_TIME);
      audioData.setData(static_cast<AudioData::Sample*>(data));
      IAudioIO* audioIO = new PortAudio;
      audioIO->startPlay(audioData);
    }
}
Ejemplo n.º 6
0
void main_loop(KeyboardInput &keyboardInput)
{
	bool quit = false;
	AudioData audioData(cmdline_opt.audio_card);

	while (!quit)
	{
		int ch = keyboardInput.ReadKey();

		if (ch != ERR)
		{
			std::cout << "ch: " << ch << std::endl;
			
			std::string state_str;
			state_str = keyboardInput.KeyPressedAction(ch);

			pthread_mutex_lock(&rvopt_mutex);

			if (quit || rvopt.quit)
			{
				quit = true;
				rvopt.quit = true;
			}

			//Volume change?
			if (rvopt.volume != -1)
			{
				audioData.SetVolume(rvopt.volume);
			}

			pthread_mutex_unlock(&rvopt_mutex);

			//Speak state_str?
			if ((cmdline_opt.speak) && (state_str != ""))
			{
				if (!audioData.Speak(state_str))
				{
					std::cerr << "Error calling Speak(). Use verbose mode for more info." << std::endl;
				}
			}

		}
	}
}
Ejemplo n.º 7
0
void
AudioCallbackAdapter::Decoded(const nsTArray<int16_t>& aPCM, uint64_t aTimeStamp, uint32_t aChannels, uint32_t aRate)
{
  MOZ_ASSERT(IsOnGMPThread());

  if (aRate == 0 || aChannels == 0) {
    NS_WARNING("Invalid rate or num channels returned on GMP audio samples");
    mCallback->Error();
    return;
  }

  size_t numFrames = aPCM.Length() / aChannels;
  MOZ_ASSERT((aPCM.Length() % aChannels) == 0);
  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[aPCM.Length()]);

  for (size_t i = 0; i < aPCM.Length(); ++i) {
    audioData[i] = AudioSampleToFloat(aPCM[i]);
  }

  if (mMustRecaptureAudioPosition) {
    mAudioFrameSum = 0;
    auto timestamp = UsecsToFrames(aTimeStamp, aRate);
    if (!timestamp.isValid()) {
      NS_WARNING("Invalid timestamp");
      mCallback->Error();
      return;
    }
    mAudioFrameOffset = timestamp.value();
    MOZ_ASSERT(mAudioFrameOffset >= 0);
    mMustRecaptureAudioPosition = false;
  }

  auto timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, aRate);
  if (!timestamp.isValid()) {
    NS_WARNING("Invalid timestamp on audio samples");
    mCallback->Error();
    return;
  }
  mAudioFrameSum += numFrames;

  auto duration = FramesToUsecs(numFrames, aRate);
  if (!duration.isValid()) {
    NS_WARNING("Invalid duration on audio samples");
    mCallback->Error();
    return;
  }

  nsRefPtr<AudioData> audio(new AudioData(mLastStreamOffset,
                                          timestamp.value(),
                                          duration.value(),
                                          numFrames,
                                          audioData.forget(),
                                          aChannels,
                                          aRate));

#ifdef LOG_SAMPLE_DECODE
  LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
      timestamp, duration, currentLength);
#endif

  mCallback->Output(audio);
}
Ejemplo n.º 8
0
Status MovieExporter::assembleAudio( const Object* obj,
                                     QString ffmpegPath,
                                     std::function<void( float )> progress )
{
    // Quicktime assemble call
    int startFrame = mDesc.startFrame;
    int endFrame = mDesc.endFrame;
    int fps = mDesc.fps;

    Q_ASSERT( startFrame >= 0 );
    Q_ASSERT( endFrame > startFrame );

    float lengthInSec = ( endFrame - startFrame + 1 ) / (float)fps;
    qDebug() << "Audio Length = " << lengthInSec << " seconds";

    int32_t audioDataSize = 44100 * 2 * 2 * lengthInSec;

    std::vector<int16_t> audioData( audioDataSize / sizeof( int16_t ) );

    bool audioDataValid = false;

    QDir dir( mTempWorkDir );
    Q_ASSERT( dir.exists() );

    QString tempAudioPath = mTempWorkDir + "/tmpaudio0.wav";
    qDebug() << "TempAudio=" << tempAudioPath;

    std::vector< SoundClip* > allSoundClips;

    std::vector< LayerSound* > allSoundLayers = obj->getLayersByType<LayerSound>();
    for ( LayerSound* layer : allSoundLayers )
    {
        layer->foreachKeyFrame( [&allSoundClips]( KeyFrame* key )
        {
            allSoundClips.push_back( static_cast<SoundClip*>( key ) );
        } );
    }

    int clipCount = 0;

    for ( SoundClip* clip : allSoundClips )
    {
        if ( mCanceled )
        {
            return Status::CANCELED;
        }

        // convert audio file: 44100Hz sampling rate, stereo, signed 16 bit little endian
        // supported audio file types: wav, mp3, ogg... ( all file types supported by ffmpeg )
        QString strCmd;
        strCmd += QString("\"%1\"").arg( ffmpegPath );
        strCmd += QString( " -i \"%1\" " ).arg( clip->fileName() );
        strCmd += "-ar 44100 -acodec pcm_s16le -ac 2 -y ";
        strCmd += QString( "\"%1\"" ).arg( tempAudioPath );

        executeFFMpegCommand( strCmd );
        qDebug() << "audio file: " + tempAudioPath;

        // Read wav file header
        WavFileHeader header;
        QFile file( tempAudioPath );
        file.open( QIODevice::ReadOnly );
        file.read( (char*)&header, sizeof( WavFileHeader ) );

        skipUselessChucks( header, file );

        int32_t audioSize = header.dataSize;

        qDebug() << "audio len " << audioSize;

        // before calling malloc should check: audioSize < max credible value
        std::vector< int16_t > data( audioSize / sizeof( int16_t ) );
        file.read( (char*)data.data(), audioSize );
        audioDataValid = true;

        float fframe = (float)clip->pos() / (float)fps;
        int delta = fframe * 44100 * 2;
        qDebug() << "audio delta " << delta;

        int indexMax = std::min( audioSize / 2, audioDataSize / 2 - delta );

        // audio files 'mixing': 'higher' sound layers overwrite 'lower' sound layers
        for ( int i = 0; i < indexMax; i++ )
        {
            audioData[ i + delta ] = safeSumInt16( audioData[ i + delta ], data[ i ] );
        }

        file.close();

        float p = ( (float)clipCount / allSoundClips.size() );
        progress( p * 0.1f );
        clipCount++;
    }

    if ( !audioDataValid )
    {
        return Status::SAFE;
    }

    // save mixed audio file ( will be used as audio stream )
    QFile file( mTempWorkDir + "/tmpaudio.wav" );
    file.open( QIODevice::WriteOnly );

    WavFileHeader outputHeader;
    outputHeader.InitWithDefaultValues();
    outputHeader.dataSize = audioDataSize;
    outputHeader.chuckSize = 36 + audioDataSize;

    file.write( (char*)&outputHeader, sizeof( outputHeader ) );
    file.write( (char*)audioData.data(), audioDataSize );
    file.close();

    return Status::OK;
}
Ejemplo n.º 9
0
HRESULT
WMFAudioMFTManager::Output(int64_t aStreamOffset,
                           nsRefPtr<MediaData>& aOutData)
{
  aOutData = nullptr;
  RefPtr<IMFSample> sample;
  HRESULT hr;
  int typeChangeCount = 0;
  while (true) {
    hr = mDecoder->Output(&sample);
    if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
      return hr;
    }
    if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
      hr = UpdateOutputType();
      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
      // Catch infinite loops, but some decoders perform at least 2 stream
      // changes on consecutive calls, so be permissive.
      // 100 is arbitrarily > 2.
      NS_ENSURE_TRUE(typeChangeCount < 100, MF_E_TRANSFORM_STREAM_CHANGE);
      ++typeChangeCount;
      continue;
    }
    break;
  }

  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  RefPtr<IMFMediaBuffer> buffer;
  hr = sample->ConvertToContiguousBuffer(byRef(buffer));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  BYTE* data = nullptr; // Note: *data will be owned by the IMFMediaBuffer, we don't need to free it.
  DWORD maxLength = 0, currentLength = 0;
  hr = buffer->Lock(&data, &maxLength, &currentLength);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Sometimes when starting decoding, the AAC decoder gives us samples
  // with a negative timestamp. AAC does usually have preroll (or encoder
  // delay) encoded into its bitstream, but the amount encoded to the stream
  // is variable, and it not signalled in-bitstream. There is sometimes
  // signalling in the MP4 container what the preroll amount, but it's
  // inconsistent. It looks like WMF's AAC encoder may take this into
  // account, so strip off samples with a negative timestamp to get us
  // to a 0-timestamp start. This seems to maintain A/V sync, so we can run
  // with this until someone complains...

  // We calculate the timestamp and the duration based on the number of audio
  // frames we've already played. We don't trust the timestamp stored on the
  // IMFSample, as sometimes it's wrong, possibly due to buggy encoders?

  // If this sample block comes after a discontinuity (i.e. a gap or seek)
  // reset the frame counters, and capture the timestamp. Future timestamps
  // will be offset from this block's timestamp.
  UINT32 discontinuity = false;
  sample->GetUINT32(MFSampleExtension_Discontinuity, &discontinuity);
  if (mMustRecaptureAudioPosition || discontinuity) {
    // Update the output type, in case this segment has a different
    // rate. This also triggers on the first sample, which can have a
    // different rate than is advertised in the container, and sometimes we
    // don't get a MF_E_TRANSFORM_STREAM_CHANGE when the rate changes.
    hr = UpdateOutputType();
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

    mAudioFrameSum = 0;
    LONGLONG timestampHns = 0;
    hr = sample->GetSampleTime(&timestampHns);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
    mAudioTimeOffset = media::TimeUnit::FromMicroseconds(timestampHns / 10);
    mMustRecaptureAudioPosition = false;
  }
  // We can assume PCM 16 output.
  int32_t numSamples = currentLength / 2;
  int32_t numFrames = numSamples / mAudioChannels;
  MOZ_ASSERT(numFrames >= 0);
  MOZ_ASSERT(numSamples >= 0);
  if (numFrames == 0) {
    // All data from this chunk stripped, loop back and try to output the next
    // frame, if possible.
    return S_OK;
  }

  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[numSamples]);

  int16_t* pcm = (int16_t*)data;
  for (int32_t i = 0; i < numSamples; ++i) {
    audioData[i] = AudioSampleToFloat(pcm[i]);
  }

  buffer->Unlock();

  media::TimeUnit timestamp =
    mAudioTimeOffset + FramesToTimeUnit(mAudioFrameSum, mAudioRate);
  NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);

  mAudioFrameSum += numFrames;

  media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
  NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);

  aOutData = new AudioData(aStreamOffset,
                           timestamp.ToMicroseconds(),
                           duration.ToMicroseconds(),
                           numFrames,
                           audioData.forget(),
                           mAudioChannels,
                           mAudioRate);

  #ifdef LOG_SAMPLE_DECODE
  LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
      timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
  #endif

  return S_OK;
}
Ejemplo n.º 10
0
nsresult
SeekTask::DropAudioUpToSeekTarget(MediaData* aSample)
{
  AssertOwnerThread();
  RefPtr<AudioData> audio(aSample->As<AudioData>());
  MOZ_ASSERT(audio && mSeekJob.Exists() && mSeekJob.mTarget.IsAccurate());

  CheckedInt64 sampleDuration = FramesToUsecs(audio->mFrames, mAudioRate);
  if (!sampleDuration.isValid()) {
    return NS_ERROR_FAILURE;
  }

  if (audio->mTime + sampleDuration.value() <= mSeekJob.mTarget.GetTime().ToMicroseconds()) {
    // Our seek target lies after the frames in this AudioData. Don't
    // push it onto the audio queue, and keep decoding forwards.
    return NS_OK;
  }

  if (audio->mTime > mSeekJob.mTarget.GetTime().ToMicroseconds()) {
    // The seek target doesn't lie in the audio block just after the last
    // audio frames we've seen which were before the seek target. This
    // could have been the first audio data we've seen after seek, i.e. the
    // seek terminated after the seek target in the audio stream. Just
    // abort the audio decode-to-target, the state machine will play
    // silence to cover the gap. Typically this happens in poorly muxed
    // files.
    DECODER_WARN("Audio not synced after seek, maybe a poorly muxed file?");
    mSeekedAudioData = audio;
    return NS_OK;
  }

  // The seek target lies somewhere in this AudioData's frames, strip off
  // any frames which lie before the seek target, so we'll begin playback
  // exactly at the seek target.
  NS_ASSERTION(mSeekJob.mTarget.GetTime().ToMicroseconds() >= audio->mTime,
               "Target must at or be after data start.");
  NS_ASSERTION(mSeekJob.mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
               "Data must end after target.");

  CheckedInt64 framesToPrune =
    UsecsToFrames(mSeekJob.mTarget.GetTime().ToMicroseconds() - audio->mTime, mAudioRate);
  if (!framesToPrune.isValid()) {
    return NS_ERROR_FAILURE;
  }
  if (framesToPrune.value() > audio->mFrames) {
    // We've messed up somehow. Don't try to trim frames, the |frames|
    // variable below will overflow.
    DECODER_WARN("Can't prune more frames that we have!");
    return NS_ERROR_FAILURE;
  }
  uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
  uint32_t channels = audio->mChannels;
  AlignedAudioBuffer audioData(frames * channels);
  if (!audioData) {
    return NS_ERROR_OUT_OF_MEMORY;
  }

  memcpy(audioData.get(),
         audio->mAudioData.get() + (framesToPrune.value() * channels),
         frames * channels * sizeof(AudioDataValue));
  CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
  if (!duration.isValid()) {
    return NS_ERROR_FAILURE;
  }
  RefPtr<AudioData> data(new AudioData(audio->mOffset,
                                       mSeekJob.mTarget.GetTime().ToMicroseconds(),
                                       duration.value(),
                                       frames,
                                       Move(audioData),
                                       channels,
                                       audio->mRate));
  MOZ_ASSERT(!mSeekedAudioData, "Should be the 1st sample after seeking");
  mSeekedAudioData = data;

  return NS_OK;
}
Ejemplo n.º 11
0
HRESULT
WMFAudioMFTManager::Output(int64_t aStreamOffset,
                           nsAutoPtr<MediaData>& aOutData)
{
  aOutData = nullptr;
  RefPtr<IMFSample> sample;
  HRESULT hr;
  while (true) {
    hr = mDecoder->Output(&sample);
    if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
      return hr;
    }
    if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
      hr = UpdateOutputType();
      NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
      continue;
    }
    break;
  }

  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  RefPtr<IMFMediaBuffer> buffer;
  hr = sample->ConvertToContiguousBuffer(byRef(buffer));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  BYTE* data = nullptr; // Note: *data will be owned by the IMFMediaBuffer, we don't need to free it.
  DWORD maxLength = 0, currentLength = 0;
  hr = buffer->Lock(&data, &maxLength, &currentLength);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Sometimes when starting decoding, the AAC decoder gives us samples
  // with a negative timestamp. AAC does usually have preroll (or encoder
  // delay) encoded into its bitstream, but the amount encoded to the stream
  // is variable, and it not signalled in-bitstream. There is sometimes
  // signalling in the MP4 container what the preroll amount, but it's
  // inconsistent. It looks like WMF's AAC encoder may take this into
  // account, so strip off samples with a negative timestamp to get us
  // to a 0-timestamp start. This seems to maintain A/V sync, so we can run
  // with this until someone complains...

  // We calculate the timestamp and the duration based on the number of audio
  // frames we've already played. We don't trust the timestamp stored on the
  // IMFSample, as sometimes it's wrong, possibly due to buggy encoders?

  // If this sample block comes after a discontinuity (i.e. a gap or seek)
  // reset the frame counters, and capture the timestamp. Future timestamps
  // will be offset from this block's timestamp.
  UINT32 discontinuity = false;
  int32_t numFramesToStrip = 0;
  sample->GetUINT32(MFSampleExtension_Discontinuity, &discontinuity);
  if (mMustRecaptureAudioPosition || discontinuity) {
    // Update the output type, in case this segment has a different
    // rate. This also triggers on the first sample, which can have a
    // different rate than is advertised in the container, and sometimes we
    // don't get a MF_E_TRANSFORM_STREAM_CHANGE when the rate changes.
    hr = UpdateOutputType();
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

    mAudioFrameSum = 0;
    LONGLONG timestampHns = 0;
    hr = sample->GetSampleTime(&timestampHns);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
    hr = HNsToFrames(timestampHns, mAudioRate, &mAudioFrameOffset);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
    if (mAudioFrameOffset < 0) {
      // First sample has a negative timestamp. Strip off the samples until
      // we reach positive territory.
      numFramesToStrip = -mAudioFrameOffset;
      mAudioFrameOffset = 0;
    }
    mMustRecaptureAudioPosition = false;
  }
  MOZ_ASSERT(numFramesToStrip >= 0);
  int32_t numSamples = currentLength / mAudioBytesPerSample;
  int32_t numFrames = numSamples / mAudioChannels;
  int32_t offset = std::min<int32_t>(numFramesToStrip, numFrames);
  numFrames -= offset;
  numSamples -= offset * mAudioChannels;
  MOZ_ASSERT(numFrames >= 0);
  MOZ_ASSERT(numSamples >= 0);
  if (numFrames == 0) {
    // All data from this chunk stripped, loop back and try to output the next
    // frame, if possible.
    return S_OK;
  }

  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[numSamples]);

  // Just assume PCM output for now...
  MOZ_ASSERT(mAudioBytesPerSample == 2);
  int16_t* pcm = ((int16_t*)data) + (offset * mAudioChannels);
  MOZ_ASSERT(pcm >= (int16_t*)data);
  MOZ_ASSERT(pcm <= (int16_t*)(data + currentLength));
  MOZ_ASSERT(pcm+numSamples <= (int16_t*)(data + currentLength));
  for (int32_t i = 0; i < numSamples; ++i) {
    audioData[i] = AudioSampleToFloat(pcm[i]);
  }

  buffer->Unlock();
  int64_t timestamp;
  hr = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, mAudioRate, &timestamp);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  mAudioFrameSum += numFrames;

  int64_t duration;
  hr = FramesToUsecs(numFrames, mAudioRate, &duration);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  aOutData = new AudioData(aStreamOffset,
                           timestamp,
                           duration,
                           numFrames,
                           audioData.forget(),
                           mAudioChannels,
                           mAudioRate);

  #ifdef LOG_SAMPLE_DECODE
  LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
      timestamp, duration, currentLength);
  #endif

  return S_OK;
}
Ejemplo n.º 12
0
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
{
  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));

  // Decode forward to the target frame. Start with video, if we have it.
  if (HasVideo()) {
    bool eof = false;
    int64_t startTime = -1;
    nsAutoPtr<VideoData> video;
    while (HasVideo() && !eof) {
      while (VideoQueue().GetSize() == 0 && !eof) {
        bool skip = false;
        eof = !DecodeVideoFrame(skip, 0);
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      if (VideoQueue().GetSize() == 0) {
        // Hit end of file, we want to display the last frame of the video.
        if (video) {
          VideoQueue().PushFront(video.forget());
        }
        break;
      }
      video = VideoQueue().PeekFront();
      // If the frame end time is less than the seek target, we won't want
      // to display this frame after the seek, so discard it.
      if (video && video->GetEndTime() <= aTarget) {
        if (startTime == -1) {
          startTime = video->mTime;
        }
        VideoQueue().PopFront();
      } else {
        video.forget();
        break;
      }
    }
    {
      ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
      if (mDecoder->IsShutdown()) {
        return NS_ERROR_FAILURE;
      }
    }
    DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
  }

  if (HasAudio()) {
    // Decode audio forward to the seek target.
    bool eof = false;
    while (HasAudio() && !eof) {
      while (!eof && AudioQueue().GetSize() == 0) {
        eof = !DecodeAudioData();
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      const AudioData* audio = AudioQueue().PeekFront();
      if (!audio)
        break;
      CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
      CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
      if (!startFrame.isValid() || !targetFrame.isValid()) {
        return NS_ERROR_FAILURE;
      }
      if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
        // Our seek target lies after the frames in this AudioData. Pop it
        // off the queue, and keep decoding forwards.
        delete AudioQueue().PopFront();
        audio = nullptr;
        continue;
      }
      if (startFrame.value() > targetFrame.value()) {
        // The seek target doesn't lie in the audio block just after the last
        // audio frames we've seen which were before the seek target. This
        // could have been the first audio data we've seen after seek, i.e. the
        // seek terminated after the seek target in the audio stream. Just
        // abort the audio decode-to-target, the state machine will play
        // silence to cover the gap. Typically this happens in poorly muxed
        // files.
        NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
        break;
      }

      // The seek target lies somewhere in this AudioData's frames, strip off
      // any frames which lie before the seek target, so we'll begin playback
      // exactly at the seek target.
      NS_ASSERTION(targetFrame.value() >= startFrame.value(),
                   "Target must at or be after data start.");
      NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
                   "Data must end after target.");

      int64_t framesToPrune = targetFrame.value() - startFrame.value();
      if (framesToPrune > audio->mFrames) {
        // We've messed up somehow. Don't try to trim frames, the |frames|
        // variable below will overflow.
        NS_WARNING("Can't prune more frames that we have!");
        break;
      }
      uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
      uint32_t channels = audio->mChannels;
      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
      memcpy(audioData.get(),
             audio->mAudioData.get() + (framesToPrune * channels),
             frames * channels * sizeof(AudioDataValue));
      CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
      if (!duration.isValid()) {
        return NS_ERROR_FAILURE;
      }
      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
                                              aTarget,
                                              duration.value(),
                                              frames,
                                              audioData.forget(),
                                              channels));
      delete AudioQueue().PopFront();
      AudioQueue().PushFront(data.forget());
      break;
    }
  }

  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget));

  return NS_OK;
}