static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
Ejemplo n.º 2
0
void
AnalyserNode::AppendChunk(const AudioChunk& aChunk)
{
  const uint32_t bufferSize = mBuffer.Length();
  const uint32_t channelCount = aChunk.mChannelData.Length();
  uint32_t chunkDuration = aChunk.mDuration;
  MOZ_ASSERT((bufferSize & (bufferSize - 1)) == 0); // Must be a power of two!
  MOZ_ASSERT(channelCount > 0);
  MOZ_ASSERT(chunkDuration == WEBAUDIO_BLOCK_SIZE);

  if (chunkDuration > bufferSize) {
    // Copy a maximum bufferSize samples.
    chunkDuration = bufferSize;
  }

  PodCopy(mBuffer.Elements() + mWriteIndex, static_cast<const float*>(aChunk.mChannelData[0]), chunkDuration);
  for (uint32_t i = 1; i < channelCount; ++i) {
    AudioBlockAddChannelWithScale(static_cast<const float*>(aChunk.mChannelData[i]), 1.0f,
                                  mBuffer.Elements() + mWriteIndex);
  }
  if (channelCount > 1) {
    AudioBlockInPlaceScale(mBuffer.Elements() + mWriteIndex,
                           1.0f / aChunk.mChannelData.Length());
  }
  mWriteIndex += chunkDuration;
  MOZ_ASSERT(mWriteIndex <= bufferSize);
  if (mWriteIndex >= bufferSize) {
    mWriteIndex = 0;
  }
}
Ejemplo n.º 3
0
void
CDMProxy::gmp_Decrypted(uint32_t aId,
                        GMPErr aResult,
                        const nsTArray<uint8_t>& aDecryptedData)
{
  MOZ_ASSERT(IsOnGMPThread());
  for (size_t i = 0; i < mDecryptionJobs.Length(); i++) {
    DecryptJob* job = mDecryptionJobs[i];
    if (job->mId == aId) {
      if (aDecryptedData.Length() != job->mSample->size) {
        NS_WARNING("CDM returned incorrect number of decrypted bytes");
      }
      if (GMP_SUCCEEDED(aResult)) {
        PodCopy(job->mSample->data,
                aDecryptedData.Elements(),
                std::min<size_t>(aDecryptedData.Length(), job->mSample->size));
        job->mClient->Decrypted(GMPNoErr, job->mSample.forget());
      } else if (aResult == GMPNoKeyErr) {
        NS_WARNING("CDM returned GMPNoKeyErr");
        // We still have the encrypted sample, so we can re-enqueue it to be
        // decrypted again once the key is usable again.
        job->mClient->Decrypted(GMPNoKeyErr, job->mSample.forget());
      } else {
        nsAutoCString str("CDM returned decode failure GMPErr=");
        str.AppendInt(aResult);
        NS_WARNING(str.get());
        job->mClient->Decrypted(aResult, nullptr);
      }
      mDecryptionJobs.RemoveElementAt(i);
      return;
    }
  }
  NS_WARNING("GMPDecryptorChild returned incorrect job ID");
}
Ejemplo n.º 4
0
void InitBrandName()
{
  if (sBrandName) {
    return;
  }
  nsXPIDLString brandName;
  nsCOMPtr<nsIStringBundleService> stringBundleService =
    mozilla::services::GetStringBundleService();
  if (stringBundleService) {
    nsCOMPtr<nsIStringBundle> brandBundle;
    nsresult rv = stringBundleService->CreateBundle(kBrandBundleURL,
                                           getter_AddRefs(brandBundle));
    if (NS_SUCCEEDED(rv)) {
      rv = brandBundle->GetStringFromName(u"brandShortName",
                                          getter_Copies(brandName));
      NS_WARNING_ASSERTION(
        NS_SUCCEEDED(rv), "Could not get the program name for a cubeb stream.");
    }
  }
  /* cubeb expects a c-string. */
  const char* ascii = NS_LossyConvertUTF16toASCII(brandName).get();
  sBrandName = new char[brandName.Length() + 1];
  PodCopy(sBrandName.get(), ascii, brandName.Length());
  sBrandName[brandName.Length()] = 0;
}
Ejemplo n.º 5
0
void
AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
                           uint32_t aChannelNumber, uint32_t aStartInChannel,
                           ErrorResult& aRv)
{
  uint32_t length = aSource.Length();
  if (aChannelNumber >= NumberOfChannels() ||
      aStartInChannel + length >= mLength) {
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
    return;
  }

  if (!mSharedChannels && JS_GetTypedArrayLength(mJSChannels[aChannelNumber]) != mLength) {
    // The array was probably neutered
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
    return;
  }

  if (!RestoreJSChannelData(aJSContext)) {
    aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
    return;
  }

  PodCopy(JS_GetFloat32ArrayData(mJSChannels[aChannelNumber]) + aStartInChannel,
          aSource.Data(), length);
}
Ejemplo n.º 6
0
void
AudioBuffer::SetRawChannelContents(uint32_t aChannel, float* aContents)
{
  MOZ_ASSERT(!GetWrapperPreserveColor() && !mSharedChannels,
             "The AudioBuffer object should not have been handed to JS or have C++ callers neuter its typed array");
  PodCopy(JS_GetFloat32ArrayData(mJSChannels[aChannel]), aContents, mLength);
}
Ejemplo n.º 7
0
void
AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
                                  AudioSampleFormat aFormat, uint32_t aChannels,
                                  uint32_t aFrames, uint32_t aSampleRate)
{
  AutoTArray<nsTArray<AudioDataValue>, MONO> output;
  AutoTArray<const AudioDataValue*, MONO> bufferPtrs;
  output.SetLength(MONO);
  bufferPtrs.SetLength(MONO);

  uint32_t written = 0;
  // We need to copy here, because the mixer will reuse the storage, we should
  // not hold onto it. Buffers are in planar format.
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    AudioDataValue* out = output[channel].AppendElements(aFrames);
    PodCopy(out, aMixedBuffer + written, aFrames);
    bufferPtrs[channel] = out;
    written += aFrames;
  }
  AudioChunk chunk;
  chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
  chunk.mDuration = aFrames;
  chunk.mBufferFormat = aFormat;
  chunk.mVolume = 1.0f;
  chunk.mChannelData.SetLength(MONO);
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    chunk.mChannelData[channel] = bufferPtrs[channel];
  }

  // Now we have mixed data, simply append it to out track.
  EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
}
Ejemplo n.º 8
0
/**
 * aSrcRect: Rect relative to the aSrc surface
 * aDestPoint: Point inside aDest surface
 */
void
CopyRect(DataSourceSurface* aSrc, DataSourceSurface* aDest,
         IntRect aSrcRect, IntPoint aDestPoint)
{
  if (aSrcRect.Overflows() ||
      IntRect(aDestPoint, aSrcRect.Size()).Overflows()) {
    MOZ_CRASH("we should never be getting invalid rects at this point");
  }

  MOZ_RELEASE_ASSERT(aSrc->GetFormat() == aDest->GetFormat(),
                     "different surface formats");
  MOZ_RELEASE_ASSERT(IntRect(IntPoint(), aSrc->GetSize()).Contains(aSrcRect),
                     "source rect too big for source surface");
  MOZ_RELEASE_ASSERT(IntRect(IntPoint(), aDest->GetSize()).Contains(IntRect(aDestPoint, aSrcRect.Size())),
                     "dest surface too small");

  if (aSrcRect.IsEmpty()) {
    return;
  }

  DataSourceSurface::ScopedMap srcMap(aSrc, DataSourceSurface::READ);
  DataSourceSurface::ScopedMap destMap(aDest, DataSourceSurface::WRITE);
  if (MOZ2D_WARN_IF(!srcMap.IsMapped() || !destMap.IsMapped())) {
    return;
  }

  uint8_t* sourceData = DataAtOffset(aSrc, srcMap.GetMappedSurface(), aSrcRect.TopLeft());
  uint32_t sourceStride = srcMap.GetStride();
  uint8_t* destData = DataAtOffset(aDest, destMap.GetMappedSurface(), aDestPoint);
  uint32_t destStride = destMap.GetStride();

  if (BytesPerPixel(aSrc->GetFormat()) == 4) {
    for (int32_t y = 0; y < aSrcRect.height; y++) {
      PodCopy((int32_t*)destData, (int32_t*)sourceData, aSrcRect.width);
      sourceData += sourceStride;
      destData += destStride;
    }
  } else if (BytesPerPixel(aSrc->GetFormat()) == 1) {
    for (int32_t y = 0; y < aSrcRect.height; y++) {
      PodCopy(destData, sourceData, aSrcRect.width);
      sourceData += sourceStride;
      destData += destStride;
    }
  }
}
Ejemplo n.º 9
0
void
CopyChars(jschar *dest, const JSLinearString &str)
{
    AutoCheckCannotGC nogc;
    if (str.hasTwoByteChars())
        PodCopy(dest, str.twoByteChars(nogc), str.length());
    else
        CopyAndInflateChars(dest, str.latin1Chars(nogc), str.length());
}
Ejemplo n.º 10
0
 cubeb_log_message(char const str[CUBEB_LOG_MESSAGE_MAX_SIZE])
 {
   size_t length = strlen(str);
   /* paranoia against malformed message */
   assert(length < CUBEB_LOG_MESSAGE_MAX_SIZE);
   if (length > CUBEB_LOG_MESSAGE_MAX_SIZE - 1) {
     return;
   }
   PodCopy(storage, str, length);
   storage[length] = '\0';
 }
Ejemplo n.º 11
0
void AudioBufferCopyWithScale(const float* aInput,
                              float aScale,
                              float* aOutput,
                              uint32_t aSize)
{
  if (aScale == 1.0f) {
    PodCopy(aOutput, aInput, aSize);
  } else {
    for (uint32_t i = 0; i < aSize; ++i) {
      aOutput[i] = aInput[i]*aScale;
    }
  }
}
Ejemplo n.º 12
0
  nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
                  MediaFormat::Param aFormat, const TimeUnit& aDuration)
  {
    // The output on Android is always 16-bit signed
    nsresult rv;
    int32_t numChannels;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
    AudioConfig::ChannelLayout layout(numChannels);
    if (!layout.IsValid()) {
      return NS_ERROR_FAILURE;
    }

    int32_t sampleRate;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);

    int32_t size;
    NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);

    int32_t offset;
    NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);

#ifdef MOZ_SAMPLE_TYPE_S16
    const int32_t numSamples = size / 2;
#else
#error We only support 16-bit integer PCM
#endif

    const int32_t numFrames = numSamples / numChannels;
    AlignedAudioBuffer audio(numSamples);
    if (!audio) {
      return NS_ERROR_OUT_OF_MEMORY;
    }

    const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
    PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
            numSamples);

    int64_t presentationTimeUs;
    NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

    RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
                                           aDuration.ToMicroseconds(),
                                           numFrames,
                                           Move(audio),
                                           numChannels,
                                           sampleRate);
    INVOKE_CALLBACK(Output, data);
    return NS_OK;
  }
Ejemplo n.º 13
0
ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                           size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
    : m_accumulationBuffer(accumulationBuffer)
    , m_accumulationReadIndex(0)
    , m_inputReadIndex(0)
    , m_directMode(directMode)
{
    MOZ_ASSERT(impulseResponse);
    MOZ_ASSERT(accumulationBuffer);

    if (!m_directMode) {
        m_fftKernel = new FFTBlock(fftSize);
        m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength);
        m_fftConvolver = new FFTConvolver(fftSize);
    } else {
        m_directKernel.SetLength(fftSize / 2);
        PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
        m_directConvolver = new DirectConvolver(renderSliceSize);
    }
    m_temporaryBuffer.SetLength(renderSliceSize);
    PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());

    // The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.
    size_t totalDelay = stageOffset + reverbTotalLatency;

    // But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out...
    size_t halfSize = fftSize / 2;
    if (!m_directMode) {
        MOZ_ASSERT(totalDelay >= halfSize);
        if (totalDelay >= halfSize)
            totalDelay -= halfSize;
    }

    // We divide up the total delay, into pre and post delay sections so that we can schedule at exactly the moment when the FFT will happen.
    // This is coordinated with the other stages, so they don't all do their FFTs at the same time...
    int maxPreDelayLength = std::min(halfSize, totalDelay);
    m_preDelayLength = totalDelay > 0 ? renderPhase % maxPreDelayLength : 0;
    if (m_preDelayLength > totalDelay)
        m_preDelayLength = 0;

    m_postDelayLength = totalDelay - m_preDelayLength;
    m_preReadWriteIndex = 0;
    m_framesProcessed = 0; // total frames processed so far

    size_t delayBufferSize = m_preDelayLength < fftSize ? fftSize : m_preDelayLength;
    delayBufferSize = delayBufferSize < renderSliceSize ? renderSliceSize : delayBufferSize;
    m_preDelayBuffer.SetLength(delayBufferSize);
    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
}
Ejemplo n.º 14
0
void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
    mTotalFrames += aFrames;
    if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
      MOZ_LOG(AudioLogModule(), LogLevel::Debug,
              ("%p: Inserting %" PRIuSIZE " samples into graph, total frames = %" PRIu64,
               (void*)this, aFrames, mTotalFrames));
      mLastLogFrames = mTotalFrames;
    }
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    PodCopy(static_cast<T*>(buffer->Data()),
            aBuffer, aFrames * aChannels);

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    AutoTArray<const T*, 1> channels;
    // XXX Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1,
        "GraphDriver only supports us stereo audio for now");
    channels.AppendElement(static_cast<T*>(buffer->Data()));
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    mSources[i]->AppendToTrack(mTrackID, segment);
  }
}
Ejemplo n.º 15
0
nsresult
MediaCodecDataDecoder::QueueSample(const MediaRawData* aSample)
{
  MOZ_ASSERT(aSample);
  AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);

  // We have a sample, try to feed it to the decoder.
  int32_t inputIndex = -1;
  nsresult res = mDecoder->DequeueInputBuffer(kDecoderTimeout, &inputIndex);
  if (NS_FAILED(res)) {
    return res;
  }

  if (inputIndex < 0) {
    // There is no valid input buffer available.
    return NS_ERROR_FAILURE;
  }

  jni::Object::LocalRef buffer(frame.GetEnv());
  res = GetInputBuffer(frame.GetEnv(), inputIndex, &buffer);
  if (NS_FAILED(res)) {
    return res;
  }

  void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());

  MOZ_ASSERT(frame.GetEnv()->GetDirectBufferCapacity(buffer.Get()) >=
             aSample->Size(),
             "Decoder buffer is not large enough for sample");

  PodCopy(static_cast<uint8_t*>(directBuffer), aSample->Data(), aSample->Size());

  CryptoInfo::LocalRef cryptoInfo = GetCryptoInfoFromSample(aSample);
  if (cryptoInfo) {
    res = mDecoder->QueueSecureInputBuffer(inputIndex, 0, cryptoInfo,
                                           aSample->mTime, 0);
  } else {
    res = mDecoder->QueueInputBuffer(inputIndex, 0, aSample->Size(),
                                     aSample->mTime, 0);
  }

  if (NS_FAILED(res)) {
    return res;
  }

  mDurations.push_back(TimeUnit::FromMicroseconds(aSample->mDuration));
  return NS_OK;
}
Ejemplo n.º 16
0
void
ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv)
{
    if (aBuffer) {
        switch (aBuffer->NumberOfChannels()) {
        case 1:
        case 2:
        case 4:
            // Supported number of channels
            break;
        default:
            aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
            return;
        }
    }

    mBuffer = aBuffer;

    // Send the buffer to the stream
    AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
    MOZ_ASSERT(ns, "Why don't we have a stream here?");
    if (mBuffer) {
        uint32_t length = mBuffer->Length();
        nsRefPtr<ThreadSharedFloatArrayBufferList> data =
            mBuffer->GetThreadSharedChannelsForRate(aCx);
        if (data && length < WEBAUDIO_BLOCK_SIZE) {
            // For very small impulse response buffers, we need to pad the
            // buffer with 0 to make sure that the Reverb implementation
            // has enough data to compute FFTs from.
            length = WEBAUDIO_BLOCK_SIZE;
            nsRefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer =
                new ThreadSharedFloatArrayBufferList(data->GetChannels());
            float* channelData = (float*) malloc(sizeof(float) * length * data->GetChannels());
            for (uint32_t i = 0; i < data->GetChannels(); ++i) {
                PodCopy(channelData + length * i, data->GetData(i), mBuffer->Length());
                PodZero(channelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length());
                paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, free, channelData);
            }
            data = paddedBuffer;
        }
        SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length);
        SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
                                    mBuffer->SampleRate());
        ns->SetBuffer(data.forget());
    } else {
        ns->SetBuffer(nullptr);
    }
}
Ejemplo n.º 17
0
void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    PodCopy(static_cast<T*>(buffer->Data()),
            aBuffer, aFrames * aChannels);

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    AutoTArray<const T*, 1> channels;
    // XXX Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1,
        "GraphDriver only supports us stereo audio for now");
    channels.AppendElement(static_cast<T*>(buffer->Data()));
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    RUN_ON_THREAD(mThread,
                  WrapRunnable(mSources[i], &SourceMediaStream::AppendToTrack,
                               mTrackID, segment,
                               static_cast<AudioSegment*>(nullptr)),
                  NS_DISPATCH_NORMAL);
  }
}
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock.
 * aBlock must have been allocated with AllocateInputBlock and have a channel
 * count that's a superset of the channels in aInput.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  nsAutoTArray<const void*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    channels.SetLength(aInput.ChannelCount());
    PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, nullptr);
    }
  }

  uint32_t duration = aInput.GetDuration();
  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData =
      static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
    if (channels[c]) {
      switch (aInput.mBufferFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(
            static_cast<const float*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(
            static_cast<const int16_t*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      default:
        NS_ERROR("Unhandled format");
      }
    } else {
      PodZero(outputData, duration);
    }
  }
}
Ejemplo n.º 19
0
DWORD WINAPI
QueryDosDeviceWHook(LPCWSTR lpDeviceName, LPWSTR lpTargetPath, DWORD ucchMax)
{
  if (!sDeviceNames) {
    return 0;
  }
  std::wstring name = std::wstring(lpDeviceName);
  auto iter = sDeviceNames->find(name);
  if (iter == sDeviceNames->end()) {
    return 0;
  }
  const std::wstring& device = iter->second;
  if (device.size() + 1 > ucchMax) {
    return 0;
  }
  PodCopy(lpTargetPath, device.c_str(), device.size());
  lpTargetPath[device.size()] = 0;
  GMP_LOG("QueryDosDeviceWHook %S -> %S", lpDeviceName, lpTargetPath);
  return name.size();
}
Ejemplo n.º 20
0
JSFlatString*
NewStringCopyNDontDeflate(ExclusiveContext* cx, const CharT* s, size_t n)
{
    if (JSInlineString::lengthFits<CharT>(n))
        return NewInlineString<allowGC>(cx, mozilla::Range<const CharT>(s, n));

    ScopedJSFreePtr<CharT> news(cx->pod_malloc<CharT>(n + 1));
    if (!news)
        return nullptr;

    PodCopy(news.get(), s, n);
    news[n] = 0;

    JSFlatString* str = JSFlatString::new_<allowGC>(cx, news.get(), n);
    if (!str)
        return nullptr;

    news.forget();
    return str;
}
Ejemplo n.º 21
0
void
CDMProxy::DecryptJob::PostResult(GMPErr aResult, const nsTArray<uint8_t>& aDecryptedData)
{
  if (aDecryptedData.Length() != mSample->Size()) {
    NS_WARNING("CDM returned incorrect number of decrypted bytes");
  }
  if (GMP_SUCCEEDED(aResult)) {
    nsAutoPtr<MediaRawDataWriter> writer(mSample->CreateWriter());
    PodCopy(writer->Data(),
            aDecryptedData.Elements(),
            std::min<size_t>(aDecryptedData.Length(), mSample->Size()));
  } else if (aResult == GMPNoKeyErr) {
    NS_WARNING("CDM returned GMPNoKeyErr");
    // We still have the encrypted sample, so we can re-enqueue it to be
    // decrypted again once the key is usable again.
  } else {
    nsAutoCString str("CDM returned decode failure GMPErr=");
    str.AppendInt(aResult);
    NS_WARNING(str.get());
  }
  mPromise.Resolve(DecryptResult(aResult, mSample), __func__);
}
Ejemplo n.º 22
0
void
CopyChars(Latin1Char* dest, const JSLinearString& str)
{
    AutoCheckCannotGC nogc;
    if (str.hasLatin1Chars()) {
        PodCopy(dest, str.latin1Chars(nogc), str.length());
    } else {
        /*
         * When we flatten a TwoByte rope, we turn child ropes (including Latin1
         * ropes) into TwoByte dependent strings. If one of these strings is
         * also part of another Latin1 rope tree, we can have a Latin1 rope with
         * a TwoByte descendent and we end up here when we flatten it. Although
         * the chars are stored as TwoByte, we know they must be in the Latin1
         * range, so we can safely deflate here.
         */
        size_t len = str.length();
        const char16_t* chars = str.twoByteChars(nogc);
        for (size_t i = 0; i < len; i++) {
            MOZ_ASSERT(chars[i] <= JSString::MAX_LATIN1_CHAR);
            dest[i] = chars[i];
        }
    }
}
Ejemplo n.º 23
0
  nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
                  MediaFormat::Param aFormat,
                  const media::TimeUnit& aDuration) {
    // The output on Android is always 16-bit signed

    nsresult rv;
    int32_t numChannels;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);

    int32_t sampleRate;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);

    int32_t size;
    NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);

    const int32_t numFrames = (size / numChannels) / 2;
    AudioDataValue* audio = new AudioDataValue[size];
    PodCopy(audio, static_cast<AudioDataValue*>(aBuffer), size);

    int32_t offset;
    NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);

    int64_t presentationTimeUs;
    NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

    nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
                                             aDuration.ToMicroseconds(),
                                             numFrames,
                                             audio,
                                             numChannels,
                                             sampleRate);
    ENVOKE_CALLBACK(Output, data);
    return NS_OK;
  }
Ejemplo n.º 24
0
void
CDMProxy::gmp_Decrypted(uint32_t aId,
                        GMPErr aResult,
                        const nsTArray<uint8_t>& aDecryptedData)
{
  MOZ_ASSERT(IsOnGMPThread());
  for (size_t i = 0; i < mDecryptionJobs.Length(); i++) {
    DecryptJob* job = mDecryptionJobs[i];
    if (job->mId == aId) {
      if (aDecryptedData.Length() != job->mSample->size) {
        NS_WARNING("CDM returned incorrect number of decrypted bytes");
      }
      PodCopy(job->mSample->data,
              aDecryptedData.Elements(),
              std::min<size_t>(aDecryptedData.Length(), job->mSample->size));
      nsresult rv = GMP_SUCCEEDED(aResult) ? NS_OK : NS_ERROR_FAILURE;
      job->mClient->Decrypted(rv, job->mSample.forget());
      mDecryptionJobs.RemoveElementAt(i);
      return;
    } else {
      NS_WARNING("GMPDecryptorChild returned incorrect job ID");
    }
  }
}
Ejemplo n.º 25
0
void
AudioBuffer::SetRawChannelContents(JSContext* aJSContext, uint32_t aChannel,
                                   float* aContents)
{
  PodCopy(JS_GetFloat32ArrayData(mJSChannels[aChannel]), aContents, mLength);
}
Ejemplo n.º 26
0
void MediaCodecDataDecoder::DecoderLoop()
{
  bool outputDone = false;

  bool draining = false;
  bool waitingEOF = false;

  AutoLocalJNIFrame frame(GetJNIForThread(), 1);
  nsRefPtr<MediaRawData> sample;

  MediaFormat::LocalRef outputFormat(frame.GetEnv());
  nsresult res;

  for (;;) {
    {
      MonitorAutoLock lock(mMonitor);
      while (!mStopping && !mDraining && !mFlushing && mQueue.empty()) {
        if (mQueue.empty()) {
          // We could be waiting here forever if we don't signal that we need more input
          ENVOKE_CALLBACK(InputExhausted);
        }
        lock.Wait();
      }

      if (mStopping) {
        // Get out of the loop. This is the only exit point.
        break;
      }

      if (mFlushing) {
        mDecoder->Flush();
        ClearQueue();
        mFlushing =  false;
        lock.Notify();
        continue;
      }

      if (mDraining && !sample && !waitingEOF) {
        draining = true;
      }

      // We're not stopping or draining, so try to get a sample
      if (!mQueue.empty()) {
        sample = mQueue.front();
      }
    }

    if (draining && !waitingEOF) {
      MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");

      int32_t inputIndex;
      res = mDecoder->DequeueInputBuffer(DECODER_TIMEOUT, &inputIndex);
      HANDLE_DECODER_ERROR();

      if (inputIndex >= 0) {
        res = mDecoder->QueueInputBuffer(inputIndex, 0, 0, 0, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
        HANDLE_DECODER_ERROR();

        waitingEOF = true;
      }
    }

    if (sample) {
      // We have a sample, try to feed it to the decoder
      int inputIndex;
      res = mDecoder->DequeueInputBuffer(DECODER_TIMEOUT, &inputIndex);
      HANDLE_DECODER_ERROR();

      if (inputIndex >= 0) {
        jni::Object::LocalRef buffer(frame.GetEnv());
        res = GetInputBuffer(frame.GetEnv(), inputIndex, &buffer);
        HANDLE_DECODER_ERROR();

        void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());

        MOZ_ASSERT(frame.GetEnv()->GetDirectBufferCapacity(buffer.Get()) >= sample->Size(),
          "Decoder buffer is not large enough for sample");

        {
          // We're feeding this to the decoder, so remove it from the queue
          MonitorAutoLock lock(mMonitor);
          mQueue.pop();
        }

        PodCopy((uint8_t*)directBuffer, sample->Data(), sample->Size());

        res = mDecoder->QueueInputBuffer(inputIndex, 0, sample->Size(),
                                         sample->mTime, 0);
        HANDLE_DECODER_ERROR();

        mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
        sample = nullptr;
        outputDone = false;
      }
    }

    if (!outputDone) {
      BufferInfo::LocalRef bufferInfo;
      res = BufferInfo::New(&bufferInfo);
      HANDLE_DECODER_ERROR();

      int32_t outputStatus;
      res = mDecoder->DequeueOutputBuffer(bufferInfo, DECODER_TIMEOUT, &outputStatus);
      HANDLE_DECODER_ERROR();

      if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
        // We might want to call mCallback->InputExhausted() here, but there seems to be
        // some possible bad interactions here with the threading
      } else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
        res = ResetOutputBuffers();
        HANDLE_DECODER_ERROR();
      } else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
        res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat));
        HANDLE_DECODER_ERROR();
      } else if (outputStatus < 0) {
        NS_WARNING("unknown error from decoder!");
        ENVOKE_CALLBACK(Error);

        // Don't break here just in case it's recoverable. If it's not, others stuff will fail later and
        // we'll bail out.
      } else {
        int32_t flags;
        res = bufferInfo->Flags(&flags);
        HANDLE_DECODER_ERROR();

        // We have a valid buffer index >= 0 here
        if (flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) {
          if (draining) {
            draining = false;
            waitingEOF = false;

            mMonitor.Lock();
            mDraining = false;
            mMonitor.Notify();
            mMonitor.Unlock();

            ENVOKE_CALLBACK(DrainComplete);
          }

          mDecoder->ReleaseOutputBuffer(outputStatus, false);
          outputDone = true;

          // We only queue empty EOF frames, so we're done for now
          continue;
        }

        MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");

        media::TimeUnit duration;
        if (!mDurations.empty()) {
          duration = mDurations.front();
          mDurations.pop();
        }

        auto buffer = jni::Object::LocalRef::Adopt(
            frame.GetEnv()->GetObjectArrayElement(mOutputBuffers.Get(), outputStatus));
        if (buffer) {
          // The buffer will be null on Android L if we are decoding to a Surface
          void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
          Output(bufferInfo, directBuffer, outputFormat, duration);
        }

        // The Surface will be updated at this point (for video)
        mDecoder->ReleaseOutputBuffer(outputStatus, true);

        PostOutput(bufferInfo, outputFormat, duration);
      }
    }
  }

  Cleanup();

  // We're done
  MonitorAutoLock lock(mMonitor);
  mStopping = false;
  mMonitor.Notify();
}
Ejemplo n.º 27
0
void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus)
{
    // Do a fairly comprehensive sanity check.
    // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0
        && WEBAUDIO_BLOCK_SIZE <= MaxFrameSize && WEBAUDIO_BLOCK_SIZE <= size_t(sourceBus->GetDuration()) && WEBAUDIO_BLOCK_SIZE <= size_t(destinationBus->GetDuration());

    MOZ_ASSERT(isSafeToProcess);
    if (!isSafeToProcess)
        return;

    // For now only handle mono or stereo output
    MOZ_ASSERT(destinationBus->ChannelCount() <= 2);

    float* destinationChannelL = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[0]));
    const float* sourceBusL = static_cast<const float*>(sourceBus->mChannelData[0]);

    // Handle input -> output matrixing...
    size_t numInputChannels = sourceBus->ChannelCount();
    size_t numOutputChannels = destinationBus->ChannelCount();
    size_t numReverbChannels = m_convolvers.Length();

    if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) {
        // 2 -> 2 -> 2
        const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
        m_convolvers[0]->process(sourceBusL, destinationChannelL);
        m_convolvers[1]->process(sourceBusR, destinationChannelR);
    } else  if (numInputChannels == 1 && numOutputChannels == 2 && numReverbChannels == 2) {
        // 1 -> 2 -> 2
        for (int i = 0; i < 2; ++i) {
            float* destinationChannel = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[i]));
            m_convolvers[i]->process(sourceBusL, destinationChannel);
        }
    } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 2) {
        // 1 -> 1 -> 2
        m_convolvers[0]->process(sourceBusL, destinationChannelL);

        // simply copy L -> R
        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
        bool isCopySafe = destinationChannelL && destinationChannelR && size_t(destinationBus->GetDuration()) >= WEBAUDIO_BLOCK_SIZE;
        MOZ_ASSERT(isCopySafe);
        if (!isCopySafe)
            return;
        PodCopy(destinationChannelR, destinationChannelL, WEBAUDIO_BLOCK_SIZE);
    } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) {
        // 1 -> 1 -> 1
        m_convolvers[0]->process(sourceBusL, destinationChannelL);
    } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) {
        // 2 -> 4 -> 2 ("True" stereo)
        const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));

        float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0]));
        float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));

        // Process left virtual source
        m_convolvers[0]->process(sourceBusL, destinationChannelL);
        m_convolvers[1]->process(sourceBusL, destinationChannelR);

        // Process right virtual source
        m_convolvers[2]->process(sourceBusR, tempChannelL);
        m_convolvers[3]->process(sourceBusR, tempChannelR);

        AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration());
        AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration());
    } else if (numInputChannels == 1 && numReverbChannels == 4 && numOutputChannels == 2) {
        // 1 -> 4 -> 2 (Processing mono with "True" stereo impulse response)
        // This is an inefficient use of a four-channel impulse response, but we should handle the case.
        float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));

        float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0]));
        float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));

        // Process left virtual source
        m_convolvers[0]->process(sourceBusL, destinationChannelL);
        m_convolvers[1]->process(sourceBusL, destinationChannelR);

        // Process right virtual source
        m_convolvers[2]->process(sourceBusL, tempChannelL);
        m_convolvers[3]->process(sourceBusL, tempChannelR);

        AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration());
        AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration());
    } else {
        // Handle gracefully any unexpected / unsupported matrixing
        // FIXME: add code for 5.1 support...
        destinationBus->SetNull(destinationBus->GetDuration());
    }
}
Ejemplo n.º 28
0
nsresult
AppleATDecoder::DecodeSample(mp4_demuxer::MP4Sample* aSample)
{
  // Array containing the queued decoded audio frames, about to be output.
  nsTArray<AudioDataValue> outputData;
  UInt32 channels = mOutputFormat.mChannelsPerFrame;
  // Pick a multiple of the frame size close to a power of two
  // for efficient allocation.
  const uint32_t MAX_AUDIO_FRAMES = 128;
  const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;

  // Descriptions for _decompressed_ audio packets. ignored.
  nsAutoArrayPtr<AudioStreamPacketDescription>
    packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]);

  // This API insists on having packets spoon-fed to it from a callback.
  // This structure exists only to pass our state.
  PassthroughUserData userData =
    { channels, (UInt32)aSample->size, aSample->data };

  // Decompressed audio buffer
  nsAutoArrayPtr<AudioDataValue> decoded(new AudioDataValue[maxDecodedSamples]);

  do {
    AudioBufferList decBuffer;
    decBuffer.mNumberBuffers = 1;
    decBuffer.mBuffers[0].mNumberChannels = channels;
    decBuffer.mBuffers[0].mDataByteSize =
      maxDecodedSamples * sizeof(AudioDataValue);
    decBuffer.mBuffers[0].mData = decoded.get();

    // in: the max number of packets we can handle from the decoder.
    // out: the number of packets the decoder is actually returning.
    UInt32 numFrames = MAX_AUDIO_FRAMES;

    OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
                                                  _PassthroughInputDataCallback,
                                                  &userData,
                                                  &numFrames /* in/out */,
                                                  &decBuffer,
                                                  packets.get());

    if (rv && rv != kNoMoreDataErr) {
      LOG("Error decoding audio stream: %d\n", rv);
      return NS_ERROR_FAILURE;
    }

    if (numFrames) {
      outputData.AppendElements(decoded.get(), numFrames * channels);
    }

    if (rv == kNoMoreDataErr) {
      break;
    }
  } while (true);

  if (outputData.IsEmpty()) {
    return NS_OK;
  }

  size_t numFrames = outputData.Length() / channels;
  int rate = mOutputFormat.mSampleRate;
  CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
  if (!duration.isValid()) {
    NS_WARNING("Invalid count of accumulated audio samples");
    return NS_ERROR_FAILURE;
  }

#ifdef LOG_SAMPLE_DECODE
  LOG("pushed audio at time %lfs; duration %lfs\n",
      (double)aSample->composition_timestamp / USECS_PER_S,
      (double)duration.value() / USECS_PER_S);
#endif

  nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
  PodCopy(data.get(), &outputData[0], outputData.Length());
  nsRefPtr<AudioData> audio = new AudioData(aSample->byte_offset,
                                            aSample->composition_timestamp,
                                            duration.value(),
                                            numFrames,
                                            data.forget(),
                                            channels,
                                            rate);
  mCallback->Output(audio);
  return NS_OK;
}
Ejemplo n.º 29
0
void
CopyChars(Latin1Char *dest, const JSLinearString &str)
{
    AutoCheckCannotGC nogc;
    PodCopy(dest, str.latin1Chars(nogc), str.length());
}
Ejemplo n.º 30
0
nsresult
OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  PROFILER_LABEL("OpusTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);
  {
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
    // Wait until initialized or cancelled.
    while (!mCanceled && !mInitialized) {
      mReentrantMonitor.Wait();
    }
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }
  }

  // calculation below depends on the truth that mInitialized is true.
  MOZ_ASSERT(mInitialized);

  // re-sampled frames left last time which didn't fit into an Opus packet duration.
  const int framesLeft = mResampledLeftover.Length() / mChannels;
  // When framesLeft is 0, (GetPacketDuration() - framesLeft) is a multiple
  // of kOpusSamplingRate. There is not precision loss in the integer division
  // in computing framesToFetch. If frameLeft > 0, we need to add 1 to
  // framesToFetch to ensure there will be at least n frames after re-sampling.
  const int frameRoundUp = framesLeft ? 1 : 0;

  MOZ_ASSERT(GetPacketDuration() >= framesLeft);
  // Try to fetch m frames such that there will be n frames
  // where (n + frameLeft) >= GetPacketDuration() after re-sampling.
  const int framesToFetch = !mResampler ? GetPacketDuration()
    : (GetPacketDuration() - framesLeft) * mSamplingRate / kOpusSamplingRate
      + frameRoundUp;
  {
    // Move all the samples from mRawSegment to mSourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait until enough raw data, end of stream or cancelled.
    while (!mCanceled && mRawSegment.GetDuration() +
        mSourceSegment.GetDuration() < framesToFetch &&
        !mEndOfStream) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    mSourceSegment.AppendFrom(&mRawSegment);

    // Pad |mLookahead| samples to the end of source stream to prevent lost of
    // original data, the pcm duration will be calculated at rate 48K later.
    if (mEndOfStream && !mEosSetInEncoder) {
      mEosSetInEncoder = true;
      mSourceSegment.AppendNullData(mLookahead);
    }
  }

  // Start encoding data.
  nsAutoTArray<AudioDataValue, 9600> pcm;
  pcm.SetLength(GetPacketDuration() * mChannels);
  AudioSegment::ChunkIterator iter(mSourceSegment);
  int frameCopied = 0;

  while (!iter.IsEnded() && frameCopied < framesToFetch) {
    AudioChunk chunk = *iter;

    // Chunk to the required frame size.
    int frameToCopy = chunk.GetDuration();
    if (frameCopied + frameToCopy > framesToFetch) {
      frameToCopy = framesToFetch - frameCopied;
    }

    if (!chunk.IsNull()) {
      // Append the interleaved data to the end of pcm buffer.
      AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
        pcm.Elements() + frameCopied * mChannels);
    } else {
      memset(pcm.Elements() + frameCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }

    frameCopied += frameToCopy;
    iter.Next();
  }

  RefPtr<EncodedFrame> audiodata = new EncodedFrame();
  audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
  int framesInPCM = frameCopied;
  if (mResampler) {
    nsAutoTArray<AudioDataValue, 9600> resamplingDest;
    // We want to consume all the input data, so we slightly oversize the
    // resampled data buffer so we can fit the output data in. We cannot really
    // predict the output frame count at each call.
    uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
    uint32_t inframes = frameCopied;

    resamplingDest.SetLength(outframes * mChannels);

#if MOZ_SAMPLE_TYPE_S16
    short* in = reinterpret_cast<short*>(pcm.Elements());
    short* out = reinterpret_cast<short*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_int(mResampler, in, &inframes,
                                                        out, &outframes);
#else
    float* in = reinterpret_cast<float*>(pcm.Elements());
    float* out = reinterpret_cast<float*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_float(mResampler, in, &inframes,
                                                          out, &outframes);
#endif

    MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
    PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
        mResampledLeftover.Length());

    uint32_t outframesToCopy = std::min(outframes,
        static_cast<uint32_t>(GetPacketDuration() - framesLeft));

    MOZ_ASSERT(pcm.Length() - mResampledLeftover.Length() >=
        outframesToCopy * mChannels);
    PodCopy(pcm.Elements() + mResampledLeftover.Length(),
        resamplingDest.Elements(), outframesToCopy * mChannels);
    int frameLeftover = outframes - outframesToCopy;
    mResampledLeftover.SetLength(frameLeftover * mChannels);
    PodCopy(mResampledLeftover.Elements(),
        resamplingDest.Elements() + outframesToCopy * mChannels,
        mResampledLeftover.Length());
    // This is always at 48000Hz.
    framesInPCM = framesLeft + outframesToCopy;
    audiodata->SetDuration(framesInPCM);
  } else {
    // The ogg time stamping and pre-skip is always timed at 48000.
    audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
  }

  // Remove the raw data which has been pulled to pcm buffer.
  // The value of frameCopied should equal to (or smaller than, if eos)
  // GetPacketDuration().
  mSourceSegment.RemoveLeading(frameCopied);

  // Has reached the end of input stream and all queued data has pulled for
  // encoding.
  if (mSourceSegment.GetDuration() == 0 && mEndOfStream) {
    mEncodingComplete = true;
    LOG("[Opus] Done encoding.");
  }

  MOZ_ASSERT(mEndOfStream || framesInPCM == GetPacketDuration());

  // Append null data to pcm buffer if the leftover data is not enough for
  // opus encoder.
  if (framesInPCM < GetPacketDuration() && mEndOfStream) {
    PodZero(pcm.Elements() + framesInPCM * mChannels,
        (GetPacketDuration() - framesInPCM) * mChannels);
  }
  nsTArray<uint8_t> frameData;
  // Encode the data with Opus Encoder.
  frameData.SetLength(MAX_DATA_BYTES);
  // result is returned as opus error code if it is negative.
  int result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
  const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
  result = opus_encode(mEncoder, pcmBuf, GetPacketDuration(),
                       frameData.Elements(), MAX_DATA_BYTES);
#else
  const float* pcmBuf = static_cast<float*>(pcm.Elements());
  result = opus_encode_float(mEncoder, pcmBuf, GetPacketDuration(),
                             frameData.Elements(), MAX_DATA_BYTES);
#endif
  frameData.SetLength(result >= 0 ? result : 0);

  if (result < 0) {
    LOG("[Opus] Fail to encode data! Result: %s.", opus_strerror(result));
  }
  if (mEncodingComplete) {
    if (mResampler) {
      speex_resampler_destroy(mResampler);
      mResampler = nullptr;
    }
    mResampledLeftover.SetLength(0);
  }

  audiodata->SwapInFrameData(frameData);
  aData.AppendEncodedFrame(audiodata);
  return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
}