コード例 #1
0
static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
コード例 #2
0
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
  : mImageContainer(aParams.mImageContainer)
  , mTaskQueue(aParams.mTaskQueue)
  , mInfo(aParams.VideoConfig())
  , mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
{
  MOZ_COUNT_CTOR(VPXDecoder);
  PodZero(&mVPX);
  PodZero(&mVPXAlpha);
}
コード例 #3
0
ファイル: FFTConvolver.cpp プロジェクト: AOSC-Dev/Pale-Moon
FFTConvolver::FFTConvolver(size_t fftSize)
    : m_frame(fftSize)
    , m_readWriteIndex(0)
{
  m_inputBuffer.SetLength(fftSize);
  PodZero(m_inputBuffer.Elements(), fftSize);
  m_outputBuffer.SetLength(fftSize);
  PodZero(m_outputBuffer.Elements(), fftSize);
  m_lastOverlapBuffer.SetLength(fftSize / 2);
  PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
コード例 #4
0
  void ProcessBlock(AudioNodeStream* aStream,
                    GraphTime aFrom,
                    const AudioBlock& aInput,
                    AudioBlock* aOutput,
                    bool* aFinished) override
  {
    // This node is not connected to anything. Per spec, we don't fire the
    // onaudioprocess event. We also want to clear out the input and output
    // buffer queue, and output a null buffer.
    if (!mIsConnected) {
      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
      mSharedBuffers->Reset();
      mInputWriteIndex = 0;
      return;
    }

    // The input buffer is allocated lazily when non-null input is received.
    if (!aInput.IsNull() && !mInputBuffer) {
      mInputBuffer = ThreadSharedFloatArrayBufferList::
        Create(mInputChannelCount, mBufferSize, fallible);
      if (mInputBuffer && mInputWriteIndex) {
        // Zero leading for null chunks that were skipped.
        for (uint32_t i = 0; i < mInputChannelCount; ++i) {
          float* channelData = mInputBuffer->GetDataForWrite(i);
          PodZero(channelData, mInputWriteIndex);
        }
      }
    }

    // First, record our input buffer, if its allocation succeeded.
    uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
    for (uint32_t i = 0; i < inputChannelCount; ++i) {
      float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
      if (aInput.IsNull()) {
        PodZero(writeData, aInput.GetDuration());
      } else {
        MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
        MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
        AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
                                       aInput.mVolume, writeData);
      }
    }
    mInputWriteIndex += aInput.GetDuration();

    // Now, see if we have data to output
    // Note that we need to do this before sending the buffer to the main
    // thread so that our delay time is updated.
    *aOutput = mSharedBuffers->GetOutputBuffer();

    if (mInputWriteIndex >= mBufferSize) {
      SendBuffersToMainThread(aStream, aFrom);
      mInputWriteIndex -= mBufferSize;
    }
  }
コード例 #5
0
ファイル: VPXDecoder.cpp プロジェクト: jld/gecko-dev
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
    : mImageContainer(aParams.mImageContainer),
      mImageAllocator(aParams.mKnowsCompositor),
      mTaskQueue(aParams.mTaskQueue),
      mInfo(aParams.VideoConfig()),
      mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType)),
      mLowLatency(
          aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency)) {
  MOZ_COUNT_CTOR(VPXDecoder);
  PodZero(&mVPX);
  PodZero(&mVPXAlpha);
}
コード例 #6
0
ファイル: FFTConvolver.cpp プロジェクト: LaiPhil/gecko-dev
FFTConvolver::FFTConvolver(size_t fftSize, size_t renderPhase)
    : m_frame(fftSize)
    , m_readWriteIndex(renderPhase % (fftSize / 2))
{
    MOZ_ASSERT(fftSize >= 2 * WEBAUDIO_BLOCK_SIZE);
  m_inputBuffer.SetLength(fftSize);
  PodZero(m_inputBuffer.Elements(), fftSize);
  m_outputBuffer.SetLength(fftSize);
  PodZero(m_outputBuffer.Elements(), fftSize);
  m_lastOverlapBuffer.SetLength(fftSize / 2);
  PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
コード例 #7
0
nsresult
AppleATDecoder::SetupDecoder(mp4_demuxer::MP4Sample* aSample)
{
  if (mFormatID == kAudioFormatMPEG4AAC &&
      mConfig.extended_profile == 2) {
    // Check for implicit SBR signalling if stream is AAC-LC
    // This will provide us with an updated magic cookie for use with
    // GetInputAudioDescription.
    if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
        !mMagicCookie.Length()) {
      // nothing found yet, will try again later
      return NS_ERROR_NOT_INITIALIZED;
    }
    // An error occurred, fallback to using default stream description
  }

  LOG("Initializing Apple AudioToolbox decoder");

  AudioStreamBasicDescription inputFormat;
  PodZero(&inputFormat);
  nsresult rv =
    GetInputAudioDescription(inputFormat,
                             mMagicCookie.Length() ?
                                 mMagicCookie : *mConfig.extra_data);
  if (NS_FAILED(rv)) {
    return rv;
  }
  // Fill in the output format manually.
  PodZero(&mOutputFormat);
  mOutputFormat.mFormatID = kAudioFormatLinearPCM;
  mOutputFormat.mSampleRate = inputFormat.mSampleRate;
  mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  mOutputFormat.mBitsPerChannel = 32;
  mOutputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  mOutputFormat.mFramesPerPacket = 1;
  mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
        = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;

  OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
  if (status) {
    LOG("Error %d constructing AudioConverter", status);
    mConverter = nullptr;
    return NS_ERROR_FAILURE;
  }
  return NS_OK;
}
コード例 #8
0
ファイル: VorbisDecoder.cpp プロジェクト: luke-chang/gecko-1
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
  : mInfo(aParams.AudioConfig())
  , mTaskQueue(aParams.mTaskQueue)
  , mPacketCount(0)
  , mFrames(0)
{
  // Zero these member vars to avoid crashes in Vorbis clear functions when
  // destructor is called before |Init|.
  PodZero(&mVorbisBlock);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisInfo);
  PodZero(&mVorbisComment);
}
コード例 #9
0
RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  AutoTArray<unsigned char*,4> headers;
  AutoTArray<size_t,4> headerLens;
  if (!XiphExtradataToHeaders(headers, headerLens,
                              mInfo.mCodecSpecificConfig->Elements(),
                              mInfo.mCodecSpecificConfig->Length())) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }
  for (size_t i = 0; i < headers.Length(); i++) {
    if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
      return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                          __func__);
    }
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
  if (!layout.IsValid()) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
コード例 #10
0
ファイル: VorbisDecoder.cpp プロジェクト: rtrsparq/gecko-dev
nsresult
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  size_t available = mInfo.mCodecSpecificConfig->Length();
  uint8_t *p = mInfo.mCodecSpecificConfig->Elements();
  for(int i = 0; i < 3; i++) {
    if (available < 2) {
      return NS_ERROR_FAILURE;
    }
    available -= 2;
    size_t length = BigEndian::readUint16(p);
    p += 2;
    if (available < length) {
      return NS_ERROR_FAILURE;
    }
    available -= length;
    if (NS_FAILED(DecodeHeader((const unsigned char*)p, length))) {
        return NS_ERROR_FAILURE;
    }
    p += length;
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return NS_ERROR_FAILURE;
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return NS_ERROR_FAILURE;
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  return NS_OK;
}
コード例 #11
0
ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                           size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
    : m_accumulationBuffer(accumulationBuffer)
    , m_accumulationReadIndex(0)
    , m_inputReadIndex(0)
    , m_directMode(directMode)
{
    MOZ_ASSERT(impulseResponse);
    MOZ_ASSERT(accumulationBuffer);

    if (!m_directMode) {
        m_fftKernel = new FFTBlock(fftSize);
        m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength);
        m_fftConvolver = new FFTConvolver(fftSize);
    } else {
        m_directKernel.SetLength(fftSize / 2);
        PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
        m_directConvolver = new DirectConvolver(renderSliceSize);
    }
    m_temporaryBuffer.SetLength(renderSliceSize);
    PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());

    // The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.
    size_t totalDelay = stageOffset + reverbTotalLatency;

    // But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out...
    size_t halfSize = fftSize / 2;
    if (!m_directMode) {
        MOZ_ASSERT(totalDelay >= halfSize);
        if (totalDelay >= halfSize)
            totalDelay -= halfSize;
    }

    // We divide up the total delay, into pre and post delay sections so that we can schedule at exactly the moment when the FFT will happen.
    // This is coordinated with the other stages, so they don't all do their FFTs at the same time...
    int maxPreDelayLength = std::min(halfSize, totalDelay);
    m_preDelayLength = totalDelay > 0 ? renderPhase % maxPreDelayLength : 0;
    if (m_preDelayLength > totalDelay)
        m_preDelayLength = 0;

    m_postDelayLength = totalDelay - m_preDelayLength;
    m_preReadWriteIndex = 0;
    m_framesProcessed = 0; // total frames processed so far

    size_t delayBufferSize = m_preDelayLength < fftSize ? fftSize : m_preDelayLength;
    delayBufferSize = delayBufferSize < renderSliceSize ? renderSliceSize : delayBufferSize;
    m_preDelayBuffer.SetLength(delayBufferSize);
    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
}
コード例 #12
0
void
AppleATDecoder::SetupDecoder()
{
  AudioStreamBasicDescription inputFormat, outputFormat;
  // Fill in the input format description from the stream.
  AppleUtils::GetProperty(mStream,
      kAudioFileStreamProperty_DataFormat, &inputFormat);

  // Fill in the output format manually.
  PodZero(&outputFormat);
  outputFormat.mFormatID = kAudioFormatLinearPCM;
  outputFormat.mSampleRate = inputFormat.mSampleRate;
  outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  outputFormat.mBitsPerChannel = 32;
  outputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  outputFormat.mFramesPerPacket = 1;
  outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
        = outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;

  OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
  if (rv) {
    LOG("Error %d constructing AudioConverter", rv);
    mConverter = nullptr;
    mCallback->Error();
  }
  mHaveOutput = false;
}
コード例 #13
0
ファイル: VorbisDecoder.cpp プロジェクト: Shaif95/gecko-dev
VorbisDataDecoder::VorbisDataDecoder(const AudioInfo& aConfig,
                                     FlushableTaskQueue* aTaskQueue,
                                     MediaDataDecoderCallback* aCallback)
  : mInfo(aConfig)
  , mTaskQueue(aTaskQueue)
  , mCallback(aCallback)
  , mPacketCount(0)
  , mFrames(0)
{
  // Zero these member vars to avoid crashes in Vorbis clear functions when
  // destructor is called before |Init|.
  PodZero(&mVorbisBlock);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisInfo);
  PodZero(&mVorbisComment);
}
コード例 #14
0
void
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
                                      const AudioBlock& aChunk,
                                      AudioBlock* aBlock,
                                      nsTArray<float>* aDownmixBuffer)
{
  nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
  UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);

  for (uint32_t c = 0; c < channels.Length(); ++c) {
    const float* inputData = static_cast<const float*>(channels[c]);
    float* outputData = aBlock->ChannelFloatsForWrite(c);
    if (inputData) {
      if (aInputIndex == 0) {
        AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
      } else {
        AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
      }
    } else {
      if (aInputIndex == 0) {
        PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
      }
    }
  }
}
コード例 #15
0
mozilla::ipc::IPCResult CompositorManagerParent::RecvReportMemory(
    ReportMemoryResolver&& aResolver) {
  MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
  MemoryReport aggregate;
  PodZero(&aggregate);

  // Accumulate RenderBackend usage.
  nsTArray<PCompositorBridgeParent*> compositorBridges;
  ManagedPCompositorBridgeParent(compositorBridges);
  for (auto bridge : compositorBridges) {
    static_cast<CompositorBridgeParentBase*>(bridge)->AccumulateMemoryReport(
        &aggregate);
  }

  // Accumulate Renderer usage asynchronously, and resolve.
  //
  // Note that the IPDL machinery requires aResolver to be called on this
  // thread, so we can't just pass it over to the renderer thread. We use
  // an intermediate MozPromise instead.
  wr::RenderThread::AccumulateMemoryReport(aggregate)->Then(
      CompositorThreadHolder::Loop()->SerialEventTarget(), __func__,
      [resolver = std::move(aResolver)](MemoryReport aReport) {
        resolver(aReport);
      },
      [](bool) {
        MOZ_ASSERT_UNREACHABLE("MemoryReport promises are never rejected");
      });

  return IPC_OK();
}
コード例 #16
0
ファイル: AOMDecoder.cpp プロジェクト: bgrins/gecko-dev
AOMDecoder::AOMDecoder(const CreateDecoderParams& aParams)
  : mImageContainer(aParams.mImageContainer)
  , mTaskQueue(aParams.mTaskQueue)
  , mInfo(aParams.VideoConfig())
{
  PodZero(&mCodec);
}
コード例 #17
0
ファイル: DelayBuffer.cpp プロジェクト: Andrel322/gecko-dev
void
DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                          const AudioChunk* aOutputChunk,
                          uint32_t aFirstChannel, uint32_t aNumChannelsToRead,
                          ChannelInterpretation aChannelInterpretation)
{
  uint32_t totalChannelCount = aOutputChunk->mChannelData.Length();
  uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead;
  MOZ_ASSERT(readChannelsEnd <= totalChannelCount);

  if (mUpmixChannels.Length() != totalChannelCount) {
    mLastReadChunk = -1; // invalidate cache
  }

  float* const* outputChannels = reinterpret_cast<float* const*>
    (const_cast<void* const*>(aOutputChunk->mChannelData.Elements()));
  for (uint32_t channel = aFirstChannel;
       channel < readChannelsEnd; ++channel) {
    PodZero(outputChannels[channel], WEBAUDIO_BLOCK_SIZE);
  }

  for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
    double currentDelay = aPerFrameDelays[i];
    MOZ_ASSERT(currentDelay >= 0.0);
    MOZ_ASSERT(currentDelay <= (mChunks.Length() - 1) * WEBAUDIO_BLOCK_SIZE);

    // Interpolate two input frames in case the read position does not match
    // an integer index.
    // Use the larger delay, for the older frame, first, as this is more
    // likely to use the cached upmixed channel arrays.
    int floorDelay = int(currentDelay);
    double interpolationFactor = currentDelay - floorDelay;
    int positions[2];
    positions[1] = PositionForDelay(floorDelay) + i;
    positions[0] = positions[1] - 1;

    for (unsigned tick = 0; tick < ArrayLength(positions); ++tick) {
      int readChunk = ChunkForPosition(positions[tick]);
      // mVolume is not set on default initialized chunks so handle null
      // chunks specially.
      if (!mChunks[readChunk].IsNull()) {
        int readOffset = OffsetForPosition(positions[tick]);
        UpdateUpmixChannels(readChunk, totalChannelCount,
                            aChannelInterpretation);
        double multiplier = interpolationFactor * mChunks[readChunk].mVolume;
        for (uint32_t channel = aFirstChannel;
             channel < readChannelsEnd; ++channel) {
          outputChannels[channel][i] += multiplier *
            static_cast<const float*>(mUpmixChannels[channel])[readOffset];
        }
      }

      interpolationFactor = 1.0 - interpolationFactor;
    }
  }
}
コード例 #18
0
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock.
 * aBlock must have been allocated with AllocateInputBlock and have a channel
 * count that's a superset of the channels in aInput.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  nsAutoTArray<const void*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    channels.SetLength(aInput.ChannelCount());
    PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, nullptr);
    }
  }

  uint32_t duration = aInput.GetDuration();
  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData =
      static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
    if (channels[c]) {
      switch (aInput.mBufferFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(
            static_cast<const float*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(
            static_cast<const int16_t*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      default:
        NS_ERROR("Unhandled format");
      }
    } else {
      PodZero(outputData, duration);
    }
  }
}
コード例 #19
0
ファイル: H264.cpp プロジェクト: AtulKumar2/gecko-dev
SPSData::SPSData()
{
  PodZero(this);
  // Default values when they aren't defined as per ITU-T H.264 (2014/02).
  chroma_format_idc = 1;
  video_format = 5;
  colour_primaries = 2;
  transfer_characteristics = 2;
  sample_ratio = 1.0;
}
コード例 #20
0
void ReverbConvolverStage::reset()
{
    if (!m_directMode)
        m_fftConvolver->reset();
    else
        m_directConvolver->reset();
    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
    m_accumulationReadIndex = 0;
    m_inputReadIndex = 0;
    m_framesProcessed = 0;
}
コード例 #21
0
ファイル: AudioNodeEngine.cpp プロジェクト: volkanh/gecko-dev
void
WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength)
{
  MOZ_ASSERT(aStart + aLength <= WEBAUDIO_BLOCK_SIZE);
  MOZ_ASSERT(!aChunk->IsNull(), "You should pass a non-null chunk");
  if (aLength == 0)
    return;

  for (uint32_t i = 0; i < aChunk->mChannelData.Length(); ++i) {
    PodZero(aChunk->ChannelFloatsForWrite(i) + aStart, aLength);
  }
}
コード例 #22
0
ファイル: KeyframeUtils.cpp プロジェクト: Pike/gecko-dev
 uint32_t SubpropertyCount(nsCSSProperty aProperty) const
 {
   if (!mSubpropertyCountInitialized) {
     PodZero(&mSubpropertyCount);
     mSubpropertyCountInitialized = true;
   }
   if (mSubpropertyCount[aProperty] == 0) {
     uint32_t count = 0;
     CSSPROPS_FOR_SHORTHAND_SUBPROPERTIES(
         p, aProperty, CSSEnabledState::eForAllContent) {
       ++count;
     }
     mSubpropertyCount[aProperty] = count;
   }
コード例 #23
0
void
CompositorD3D11::EndFrame()
{
  nsIntSize oldSize = mSize;
  EnsureSize();
  UINT presentInterval = 0;

  if (gfxWindowsPlatform::GetPlatform()->IsWARP()) {
    // When we're using WARP we cannot present immediately as it causes us
    // to tear when rendering. When not using WARP it appears the DWM takes
    // care of tearing for us.
    presentInterval = 1;
  }

  if (oldSize == mSize) {
    RefPtr<IDXGISwapChain1> chain;
    HRESULT hr = mSwapChain->QueryInterface((IDXGISwapChain1**)byRef(chain));
    if (SUCCEEDED(hr) && chain) {
      DXGI_PRESENT_PARAMETERS params;
      PodZero(&params);
      params.DirtyRectsCount = mInvalidRegion.GetNumRects();
      std::vector<RECT> rects;
      rects.reserve(params.DirtyRectsCount);

      nsIntRegionRectIterator iter(mInvalidRegion);
      const nsIntRect* r;
      uint32_t i = 0;
      while ((r = iter.Next()) != nullptr) {
        RECT rect;
        rect.left = r->x;
        rect.top = r->y;
        rect.bottom = r->YMost();
        rect.right = r->XMost();

        rects.push_back(rect);
      }

      params.pDirtyRects = &rects.front();
      chain->Present1(presentInterval, mDisableSequenceForNextFrame ? DXGI_PRESENT_DO_NOT_SEQUENCE : 0, &params);
    } else {
      mSwapChain->Present(presentInterval, mDisableSequenceForNextFrame ? DXGI_PRESENT_DO_NOT_SEQUENCE : 0);
    }
    mDisableSequenceForNextFrame = false;
    if (mTarget) {
      PaintToTarget();
    }
  }

  mCurrentRT = nullptr;
}
コード例 #24
0
void
DelayBuffer::ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                         AudioBlock* aOutputChunk, uint32_t aChannel,
                         ChannelInterpretation aChannelInterpretation)
{
  if (!mChunks.Length()) {
    float* outputChannel = aOutputChunk->ChannelFloatsForWrite(aChannel);
    PodZero(outputChannel, WEBAUDIO_BLOCK_SIZE);
    return;
  }

  ReadChannels(aPerFrameDelays, aOutputChunk,
               aChannel, 1, aChannelInterpretation);
}
コード例 #25
0
void
DeviceManagerDx::CreateWARPCompositorDevice()
{
  ScopedGfxFeatureReporter reporterWARP("D3D11-WARP", gfxPrefs::LayersD3D11ForceWARP());
  FeatureState& d3d11 = gfxConfig::GetFeature(Feature::D3D11_COMPOSITING);

  HRESULT hr;
  RefPtr<ID3D11Device> device;

  // Use D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS
  // to prevent bug 1092260. IE 11 also uses this flag.
  UINT flags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
  if (!CreateDevice(nullptr, D3D_DRIVER_TYPE_WARP, flags, hr, device)) {
    gfxCriticalError() << "Exception occurred initializing WARP D3D11 device!";
    d3d11.SetFailed(FeatureStatus::CrashedInHandler, "Crashed creating a D3D11 WARP device",
                     NS_LITERAL_CSTRING("FEATURE_FAILURE_D3D11_WARP_DEVICE"));
  }

  if (FAILED(hr) || !device) {
    // This should always succeed... in theory.
    gfxCriticalError() << "Failed to initialize WARP D3D11 device! " << hexa(hr);
    d3d11.SetFailed(FeatureStatus::Failed, "Failed to create a D3D11 WARP device",
                    NS_LITERAL_CSTRING("FEATURE_FAILURE_D3D11_WARP_DEVICE2"));
    return;
  }

  // Only test for texture sharing on Windows 8 since it puts the device into
  // an unusable state if used on Windows 7
  bool textureSharingWorks = false;
  if (IsWin8OrLater()) {
    textureSharingWorks = D3D11Checks::DoesTextureSharingWork(device);
  }

  DxgiAdapterDesc nullAdapter;
  PodZero(&nullAdapter);

  int featureLevel = device->GetFeatureLevel();
  {
    MutexAutoLock lock(mDeviceLock);
    mCompositorDevice = device;
    mDeviceStatus = Some(D3D11DeviceStatus(
      true,
      textureSharingWorks,
      featureLevel,
      nullAdapter));
  }
  mCompositorDevice->SetExceptionMode(0);

  reporterWARP.SetSuccessful();
}
コード例 #26
0
ファイル: DelayBuffer.cpp プロジェクト: Andrel322/gecko-dev
void
DelayBuffer::ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                         const AudioChunk* aOutputChunk, uint32_t aChannel,
                         ChannelInterpretation aChannelInterpretation)
{
  if (!mChunks.Length()) {
    float* outputChannel = static_cast<float*>
      (const_cast<void*>(aOutputChunk->mChannelData[aChannel]));
    PodZero(outputChannel, WEBAUDIO_BLOCK_SIZE);
    return;
  }

  ReadChannels(aPerFrameDelays, aOutputChunk,
               aChannel, 1, aChannelInterpretation);
}
コード例 #27
0
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
 * be float. Both chunks must have the same number of channels (or else
 * aInput is null). aBlock must have been allocated with AllocateInputBlock.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
{
  uint32_t d = aInput.GetDuration();
  for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
    float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
      aOffsetInBlock;
    if (aInput.IsNull()) {
      PodZero(out, d);
    } else {
      const float* in = static_cast<const float*>(aInput.mChannelData[i]);
      ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
    }
  }
}
コード例 #28
0
ファイル: ConvolverNode.cpp プロジェクト: stormandsun/firefox
void
ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv)
{
    if (aBuffer) {
        switch (aBuffer->NumberOfChannels()) {
        case 1:
        case 2:
        case 4:
            // Supported number of channels
            break;
        default:
            aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
            return;
        }
    }

    mBuffer = aBuffer;

    // Send the buffer to the stream
    AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
    MOZ_ASSERT(ns, "Why don't we have a stream here?");
    if (mBuffer) {
        uint32_t length = mBuffer->Length();
        nsRefPtr<ThreadSharedFloatArrayBufferList> data =
            mBuffer->GetThreadSharedChannelsForRate(aCx);
        if (data && length < WEBAUDIO_BLOCK_SIZE) {
            // For very small impulse response buffers, we need to pad the
            // buffer with 0 to make sure that the Reverb implementation
            // has enough data to compute FFTs from.
            length = WEBAUDIO_BLOCK_SIZE;
            nsRefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer =
                new ThreadSharedFloatArrayBufferList(data->GetChannels());
            float* channelData = (float*) malloc(sizeof(float) * length * data->GetChannels());
            for (uint32_t i = 0; i < data->GetChannels(); ++i) {
                PodCopy(channelData + length * i, data->GetData(i), mBuffer->Length());
                PodZero(channelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length());
                paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, free, channelData);
            }
            data = paddedBuffer;
        }
        SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length);
        SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
                                    mBuffer->SampleRate());
        ns->SetBuffer(data.forget());
    } else {
        ns->SetBuffer(nullptr);
    }
}
コード例 #29
0
ファイル: D3D11Checks.cpp プロジェクト: yrliou/gecko-dev
/* static */ void
D3D11Checks::WarnOnAdapterMismatch(ID3D11Device *device)
{
  DXGI_ADAPTER_DESC desc;
  PodZero(&desc);
  GetDxgiDesc(device, &desc);

  nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
  nsString vendorID;
  gfxInfo->GetAdapterVendorID(vendorID);
  nsresult ec;
  int32_t vendor = vendorID.ToInteger(&ec, 16);
  if (vendor != desc.VendorId) {
    gfxCriticalNote << "VendorIDMismatch V " << hexa(vendor) << " " << hexa(desc.VendorId);
  }
}
コード例 #30
0
void
SpeechStreamListener::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                               TrackID aID,
                                               StreamTime aTrackOffset,
                                               uint32_t aTrackEvents,
                                               const MediaSegment& aQueuedMedia,
                                               MediaStream* aInputStream,
                                               TrackID aInputTrackID)
{
  AudioSegment* audio = const_cast<AudioSegment*>(
    static_cast<const AudioSegment*>(&aQueuedMedia));

  AudioSegment::ChunkIterator iterator(*audio);
  while (!iterator.IsEnded()) {
    // Skip over-large chunks so we don't crash!
    if (iterator->GetDuration() > INT_MAX) {
      continue;
    }
    int duration = int(iterator->GetDuration());

    if (iterator->IsNull()) {
      nsTArray<int16_t> nullData;
      PodZero(nullData.AppendElements(duration), duration);
      ConvertAndDispatchAudioChunk(duration, iterator->mVolume,
                                   nullData.Elements(), aGraph->GraphRate());
    } else {
      AudioSampleFormat format = iterator->mBufferFormat;

      MOZ_ASSERT(format == AUDIO_FORMAT_S16 || format == AUDIO_FORMAT_FLOAT32);

      if (format == AUDIO_FORMAT_S16) {
        ConvertAndDispatchAudioChunk(duration,iterator->mVolume,
                                     static_cast<const int16_t*>(iterator->mChannelData[0]),
                                     aGraph->GraphRate());
      } else if (format == AUDIO_FORMAT_FLOAT32) {
        ConvertAndDispatchAudioChunk(duration,iterator->mVolume,
                                     static_cast<const float*>(iterator->mChannelData[0]),
                                     aGraph->GraphRate());
      }
    }

    iterator.Next();
  }
}