예제 #1
0
void
AudioBuffer::MixToMono(JSContext* aJSContext)
{
  if (mJSChannels.Length() == 1) {
    // The buffer is already mono
    return;
  }

  // Prepare the input channels
  nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channels;
  channels.SetLength(mJSChannels.Length());
  for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
    channels[i] = JS_GetFloat32ArrayData(mJSChannels[i]);
  }

  // Prepare the output channels
  float* downmixBuffer = new float[mLength];

  // Perform the down-mix
  AudioChannelsDownMix(channels, &downmixBuffer, 1, mLength);

  // Truncate the shared channels and copy the downmixed data over
  mJSChannels.SetLength(1);
  SetRawChannelContents(aJSContext, 0, downmixBuffer);
  delete[] downmixBuffer;
}
예제 #2
0
void TestDownmixStereo() {
  const size_t arraySize = 1024;
  nsTArray<const T*> inputptr;
  nsTArray<T*> input;
  T** output;

  output = new T*[1];
  output[0] = new T[arraySize];

  input.SetLength(2);
  inputptr.SetLength(2);

  for (size_t channel = 0; channel < input.Length(); channel++) {
    input[channel] = new T[arraySize];
    for (size_t i = 0; i < arraySize; i++) {
      input[channel][i] = channel == 0 ? GetLowValue<T>() : GetHighValue<T>();
    }
    inputptr[channel] = input[channel];
  }

  AudioChannelsDownMix(inputptr, output, 1, arraySize);

  for (size_t i = 0; i < arraySize; i++) {
    ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
    ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
  }

  delete[] output[0];
  delete[] output;
}
예제 #3
0
void
DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
                     AudioSampleFormat aSourceFormat, int32_t aDuration,
                     float aVolume, uint32_t aOutputChannels,
                     AudioDataValue* aOutput)
{
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixConversionBuffer;
  nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixOutputBuffer;

  channelData.SetLength(aChannelData.Length());
  if (aSourceFormat != AUDIO_FORMAT_FLOAT32) {
    NS_ASSERTION(aSourceFormat == AUDIO_FORMAT_S16, "unknown format");
    downmixConversionBuffer.SetLength(aDuration*aChannelData.Length());
    for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
      float* conversionBuf = downmixConversionBuffer.Elements() + (i*aDuration);
      const int16_t* sourceBuf = static_cast<const int16_t*>(aChannelData[i]);
      for (uint32_t j = 0; j < (uint32_t)aDuration; ++j) {
        conversionBuf[j] = AudioSampleToFloat(sourceBuf[j]);
      }
      channelData[i] = conversionBuf;
    }
  } else {
    for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
      channelData[i] = aChannelData[i];
    }
  }

  downmixOutputBuffer.SetLength(aDuration*aOutputChannels);
  nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannelBuffers;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> outputChannelData;
  outputChannelBuffers.SetLength(aOutputChannels);
  outputChannelData.SetLength(aOutputChannels);
  for (uint32_t i = 0; i < (uint32_t)aOutputChannels; ++i) {
    outputChannelData[i] = outputChannelBuffers[i] =
        downmixOutputBuffer.Elements() + aDuration*i;
  }
  if (channelData.Length() > aOutputChannels) {
    AudioChannelsDownMix(channelData, outputChannelBuffers.Elements(),
                         aOutputChannels, aDuration);
  }
  InterleaveAndConvertBuffer(outputChannelData.Elements(), AUDIO_FORMAT_FLOAT32,
                             aDuration, aVolume, aOutputChannels, aOutput);
}
예제 #4
0
void
AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
                                   uint32_t aOutputChannelCount,
                                   nsTArray<const void*>& aOutputChannels,
                                   nsTArray<float>& aDownmixBuffer)
{
  static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};

  aOutputChannels.AppendElements(aChunk->mChannelData);
  if (aOutputChannels.Length() < aOutputChannelCount) {
    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
      AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
      NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
                   "We called GetAudioChannelsSuperset to avoid this");
    } else {
      // Fill up the remaining aOutputChannels by zeros
      for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
        aOutputChannels.AppendElement(silenceChannel);
      }
    }
  } else if (aOutputChannels.Length() > aOutputChannelCount) {
    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
      nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
      outputChannels.SetLength(aOutputChannelCount);
      aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
      for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
        outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
      }

      AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
                           aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);

      aOutputChannels.SetLength(aOutputChannelCount);
      for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
        aOutputChannels[j] = outputChannels[j];
      }
    } else {
      // Drop the remaining aOutputChannels
      aOutputChannels.RemoveElementsAt(aOutputChannelCount,
        aOutputChannels.Length() - aOutputChannelCount);
    }
  }
}
예제 #5
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread() ||
        a->IsAudioParamStream()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  switch (mChannelCountMode) {
  case ChannelCountMode::Explicit:
    // Disregard the output channel count that we've calculated, and just use
    // mNumberOfInputChannels.
    outputChannelCount = mNumberOfInputChannels;
    break;
  case ChannelCountMode::Clamped_max:
    // Clamp the computed output channel count to mNumberOfInputChannels.
    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
    break;
  case ChannelCountMode::Max:
    // Nothing to do here, just shut up the compiler warning.
    break;
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
        NS_ASSERTION(outputChannelCount == channels.Length(),
                     "We called GetAudioChannelsSuperset to avoid this");
      } else {
        // Fill up the remaining channels by zeros
        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
          channels.AppendElement(silenceChannel);
        }
      }
    } else if (channels.Length() > outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
        outputChannels.SetLength(outputChannelCount);
        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
        for (uint32_t j = 0; j < outputChannelCount; ++j) {
          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
        }

        AudioChannelsDownMix(channels, outputChannels.Elements(),
                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);

        channels.SetLength(outputChannelCount);
        for (uint32_t j = 0; j < channels.Length(); ++j) {
          channels[j] = outputChannels[j];
        }
      } else {
        // Drop the remaining channels
        channels.RemoveElementsAt(outputChannelCount,
                                  channels.Length() - outputChannelCount);
      }
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }
}
void
AudioNodeExternalInputStream::TrackMapEntry::ResampleInputData(AudioSegment* aSegment)
{
  AudioSegment::ChunkIterator ci(*aSegment);
  while (!ci.IsEnded()) {
    const AudioChunk& chunk = *ci;
    nsAutoTArray<const void*,2> channels;
    if (chunk.GetDuration() > UINT32_MAX) {
      // This will cause us to OOM or overflow below. So let's just bail.
      NS_ERROR("Chunk duration out of bounds");
      return;
    }
    uint32_t duration = uint32_t(chunk.GetDuration());

    if (chunk.IsNull()) {
      nsAutoTArray<AudioDataValue,1024> silence;
      silence.SetLength(duration);
      PodZero(silence.Elements(), silence.Length());
      channels.SetLength(mResamplerChannelCount);
      for (uint32_t i = 0; i < channels.Length(); ++i) {
        channels[i] = silence.Elements();
      }
      ResampleChannels(channels, duration, AUDIO_OUTPUT_FORMAT, 0.0f);
    } else if (chunk.mChannelData.Length() == mResamplerChannelCount) {
      // Common case, since mResamplerChannelCount is set to the first chunk's
      // number of channels.
      channels.AppendElements(chunk.mChannelData);
      ResampleChannels(channels, duration, chunk.mBufferFormat, chunk.mVolume);
    } else {
      // Uncommon case. Since downmixing requires channels to be floats,
      // convert everything to floats now.
      uint32_t upChannels = GetAudioChannelsSuperset(chunk.mChannelData.Length(), mResamplerChannelCount);
      nsTArray<float> buffer;
      if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
        channels.AppendElements(chunk.mChannelData);
      } else {
        NS_ASSERTION(chunk.mBufferFormat == AUDIO_FORMAT_S16, "Unknown format");
        if (duration > UINT32_MAX/chunk.mChannelData.Length()) {
          NS_ERROR("Chunk duration out of bounds");
          return;
        }
        buffer.SetLength(chunk.mChannelData.Length()*duration);
        for (uint32_t i = 0; i < chunk.mChannelData.Length(); ++i) {
          const int16_t* samples = static_cast<const int16_t*>(chunk.mChannelData[i]);
          float* converted = &buffer[i*duration];
          for (uint32_t j = 0; j < duration; ++j) {
            converted[j] = AudioSampleToFloat(samples[j]);
          }
          channels.AppendElement(converted);
        }
      }
      nsTArray<float> zeroes;
      if (channels.Length() < upChannels) {
        zeroes.SetLength(duration);
        PodZero(zeroes.Elements(), zeroes.Length());
        AudioChannelsUpMix(&channels, upChannels, zeroes.Elements());
      }
      if (channels.Length() == mResamplerChannelCount) {
        ResampleChannels(channels, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
      } else {
        nsTArray<float> output;
        if (duration > UINT32_MAX/mResamplerChannelCount) {
          NS_ERROR("Chunk duration out of bounds");
          return;
        }
        output.SetLength(duration*mResamplerChannelCount);
        nsAutoTArray<float*,2> outputPtrs;
        nsAutoTArray<const void*,2> outputPtrsConst;
        for (uint32_t i = 0; i < mResamplerChannelCount; ++i) {
          outputPtrs.AppendElement(output.Elements() + i*duration);
          outputPtrsConst.AppendElement(outputPtrs[i]);
        }
        AudioChannelsDownMix(channels, outputPtrs.Elements(), outputPtrs.Length(), duration);
        ResampleChannels(outputPtrsConst, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
      }
    }
    ci.Next();
  }
}