Пример #1
0
      NS_IMETHOD Run() override
      {

        auto engine =
          static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
        AudioChunk output;
        output.SetNull(engine->mBufferSize);
        {
          auto node = static_cast<ScriptProcessorNode*>
            (engine->NodeMainThread());
          if (!node) {
            return NS_OK;
          }

          if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
            DispatchAudioProcessEvent(node, &output);
          }
          // The node may have been destroyed during event dispatch.
        }

        // Append it to our output buffer queue
        engine->GetSharedBuffers()->FinishProducingOutputBuffer(output);

        return NS_OK;
      }
static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
Пример #3
0
void			PAudioBuffer::sendToNetwork()
{
  int						toReach;
  int						i;
  AudioChunk				*chunk;

  if (ABS(fRdIn - fWrIn) >= SOUNDBUFF_SIZE)
    {
		toReach = fRdIn + SOUNDBUFF_SIZE;
		if (toReach >= fMaxIn)
			toReach -= fMaxIn;
		i = 0;
		while (fRdIn != toReach)
		{
			_frameBuff[i++] = input[fRdIn++];
			if (fRdIn >= fMaxIn)
			fRdIn = 0;
		}
		chunk = _bridge.popUnused();
		if (chunk == NULL)
			return ;
		chunk->clean();
		//compressed = _codec->encode(_frameBuff, FRAME_PACKET_SIZE, encodedSize);
		//chunk->assign(_frameBuff, (FRAME_PACKET_SIZE * sizeof(float)));
		// Raw Mode
		chunk->assign(_frameBuff, SOUNDBUFF_SIZE);
		_bridge.inputPush(chunk);
	}
}
Пример #4
0
void			PAudioBuffer::feed()
{
	SAMPLE			*frames;
	AudioChunk		*chunk;
	unsigned int	i = 0;

	chunk = _bridge.outputPop();
	if (chunk == NULL)
		return ;
	if (chunk->size() == 0)
		return ;
	frames = chunk->getContent();
	if (frames == NULL)
		return ;
	i = 0;
	while (i < chunk->size())
	{
		if (fWrOut >= fMaxOut)
			fWrOut = 0;
		output[fWrOut++] = frames[i++];
//		if (fWrOut == fRdOut)
//			return ;
	}
	_bridge.pushUnused(chunk);
}
Пример #5
0
			virtual void pull(AudioChunk &chunk)
			{
				if (!chunk.length())   return;
				if (!valid || finished) {chunk.silence(); return;}

				int samples, have = 0, need = chunk.length();

				//Create pointers to 16-bit data
				short *d16[PG_MAX_CHANNELS];
				for (Uint32 i = 0; i < chunk.format().channels; ++i)
					d16[i] = (short*) chunk.start(i);

				while (true)
				{
					samples = stb_vorbis_get_samples_short(ogg,
						chunk.format().channels, d16, (need-have));

					if (samples < 0)
					{
						finished = true;
						//cout << " VORBIS ERROR" << endl;
						break;
					}
					if (samples == 0)
					{
						//File's end
						if (loop)
						{
							stb_vorbis_seek_start(ogg);
							continue;
						}
						else
						{
							finished = true;
							break;
						}
					}

					for (Uint32 i=0; i < chunk.format().channels; ++i)
						d16[i] += samples;
					have += samples;
					//if (have > need) cout << "VORBIS OVERDRAW" << endl;

					//std::cout << "OGG pull: " << have << "/" << need << std::endl;

					if (have >= need) break;
				}

				//Cutoff marker if necessary
				if (have < need) chunk.cutoff(have);

				//Upsample data to 24-bit Sint32s
				for (Uint32 i=0; i < chunk.format().channels; ++i)
				{
					Sint32 *start = chunk.start(i), *op = start + have;
					short *ip = d16[i];
					while (op!=start) {*(--op) = 256 * Sint32(*(--ip));}
				}
			}
Пример #6
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
Пример #7
0
  // Read audio data in aChunk, resample them if needed,
  // and then send the result to OMX input buffer (or buffers if one buffer is not enough).
  // aSamplesRead will be the number of samples that have been read from aChunk.
  BufferState ReadChunk(AudioChunk& aChunk, size_t* aSamplesRead)
  {
    size_t chunkSamples = aChunk.GetDuration();
    size_t bytesToCopy = chunkSamples * mOMXAEncoder.mResamplingRatio
                         * mOMXAEncoder.mChannels * sizeof(AudioDataValue);
    size_t bytesCopied = 0;
    if (bytesToCopy <= AvailableSize()) {
      if (aChunk.IsNull()) {
        bytesCopied = SendSilenceToBuffer(chunkSamples);
      } else {
        bytesCopied = SendChunkToBuffer(aChunk, chunkSamples);
      }
      UpdateAfterSendChunk(chunkSamples, bytesCopied, aSamplesRead);
    } else {
      // Interleave data to a temporary buffer.
      nsAutoTArray<AudioDataValue, 9600> pcm;
      pcm.SetLength(bytesToCopy);
      AudioDataValue* interleavedSource = pcm.Elements();
      AudioTrackEncoder::InterleaveTrackData(aChunk, chunkSamples,
                                             mOMXAEncoder.mChannels,
                                             interleavedSource);

      // When the data size of chunk is larger than the buffer capacity,
      // we split it into sub-chunks to fill up buffers.
      size_t subChunkSamples = 0;
      while(GetNextSubChunk(bytesToCopy, subChunkSamples)) {
        // To avoid enqueueing an empty buffer, we follow the order that
        // clear up buffer first, then create one, send data to it in the end.
        if (!IsEmpty()) {
          // Submit the filled-up buffer and request a new buffer.
          status_t result = Enqueue(mOMXAEncoder.mTimestamp,
                                    mInputFlags & ~OMXCodecWrapper::BUFFER_EOS);
          if (result != OK) {
            return BUFFER_FAIL;
          }

          result = Dequeue();
          if (result == -EAGAIN) {
            return WAIT_FOR_NEW_BUFFER;
          }
          if (result != OK) {
            return BUFFER_FAIL;
          }
        }
        if (aChunk.IsNull()) {
          bytesCopied = SendSilenceToBuffer(subChunkSamples);
        } else {
          bytesCopied = SendInterleavedSubChunkToBuffer(interleavedSource, subChunkSamples);
        }
        UpdateAfterSendChunk(subChunkSamples, bytesCopied, aSamplesRead);
        // Move to the position where samples are not yet send to the buffer.
        interleavedSource += subChunkSamples * mOMXAEncoder.mChannels;
      }
    }
    return BUFFER_OK;
  }
Пример #8
0
void
AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
        TrackID aID,
        StreamTime aTrackOffset,
        uint32_t aTrackEvents,
        const MediaSegment& aQueuedMedia)
{
    if (mCanceled) {
        return;
    }

    const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);

    // Check and initialize parameters for codec encoder.
    if (!mInitialized) {
        mInitCounter++;
        TRACK_LOG(LogLevel::Debug, ("Init the audio encoder %d times", mInitCounter));
        AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
        while (!iter.IsEnded()) {
            AudioChunk chunk = *iter;

            // The number of channels is determined by the first non-null chunk, and
            // thus the audio encoder is initialized at this time.
            if (!chunk.IsNull()) {
                nsresult rv = Init(chunk.mChannelData.Length(), aGraph->GraphRate());
                if (NS_FAILED(rv)) {
                    LOG("[AudioTrackEncoder]: Fail to initialize the encoder!");
                    NotifyCancel();
                }
                break;
            }

            iter.Next();
        }

        mNotInitDuration += aQueuedMedia.GetDuration();
        if (!mInitialized &&
                (mNotInitDuration / aGraph->GraphRate() > INIT_FAILED_DURATION) &&
                mInitCounter > 1) {
            LOG("[AudioTrackEncoder]: Initialize failed for 30s.");
            NotifyEndOfStream();
            return;
        }
    }

    // Append and consume this raw segment.
    AppendAudioSegment(audio);


    // The stream has stopped and reached the end of track.
    if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
        LOG("[AudioTrackEncoder]: Receive TRACK_EVENT_ENDED .");
        NotifyEndOfStream();
    }
}
Пример #9
0
void Splicer::pull(AudioChunk &chunk)
{
	Uint32 left = chunk.length(), chans = chunk.format().channels;

	Sint32 *data[PG_MAX_CHANNELS];
	for (Uint32 i = 0; i < chans; ++i) data[i] = chunk.start(i);

	//Query exhausted each loop to refresh the value of "current".
	while (!exhausted())
	{
		//Pull data from next stream
		AudioChunk sub(chunk.audio, output, data, left,
			chunk.frame(), chunk.a(), chunk.b());
		current->pull(sub);

		//Partial advance
		if (current->exhausted())
		{
			Uint32 cut = sub.cutoff();
			for (Uint32 i = 0; i < chans; ++i) data[i] += cut;
			left -= cut;
			current = NULL;
			if (left) continue;
		}
		return;
	}

	//The Splicer is exhausted!
	chunk.cutoff(data[0] - chunk.start(0));
}
Пример #10
0
void
AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                            TrackID aID,
                                            TrackRate aTrackRate,
                                            TrackTicks aTrackOffset,
                                            uint32_t aTrackEvents,
                                            const MediaSegment& aQueuedMedia)
{
  if (mCanceled) {
    return;
  }

  const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);

  // Check and initialize parameters for codec encoder.
  if (!mInitialized) {
#ifdef PR_LOGGING
    mAudioInitCounter++;
    TRACK_LOG(PR_LOG_DEBUG, ("Init the audio encoder %d times", mAudioInitCounter));
#endif
    AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
    while (!iter.IsEnded()) {
      AudioChunk chunk = *iter;

      // The number of channels is determined by the first non-null chunk, and
      // thus the audio encoder is initialized at this time.
      if (!chunk.IsNull()) {
        nsresult rv = Init(chunk.mChannelData.Length(), aTrackRate);
        if (NS_FAILED(rv)) {
          LOG("[AudioTrackEncoder]: Fail to initialize the encoder!");
          NotifyCancel();
        }
        break;
      }

      iter.Next();
    }
  }

  // Append and consume this raw segment.
  AppendAudioSegment(audio);


  // The stream has stopped and reached the end of track.
  if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
    LOG("[AudioTrackEncoder]: Receive TRACK_EVENT_ENDED .");
    NotifyEndOfStream();
  }
}
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
 * be float. Both chunks must have the same number of channels (or else
 * aInput is null). aBlock must have been allocated with AllocateInputBlock.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
{
  uint32_t d = aInput.GetDuration();
  for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
    float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
      aOffsetInBlock;
    if (aInput.IsNull()) {
      PodZero(out, d);
    } else {
      const float* in = static_cast<const float*>(aInput.mChannelData[i]);
      ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
    }
  }
}
Пример #12
0
void Bandpass::pull(AudioChunk &chunk,
	const Bandpass_Node &a, const Bandpass_Node &b)
{
	//Pull source data
	source.pull(chunk);

	//Calculate RC multipliers
	float
		al = RCCONV / ((a.low<=0.0f)?40000.0f:a.low),
		ah = RCCONV / ((a.high<=0.0f)?10.0f:a.high),
		bl = RCCONV / ((b.low<=0.0f)?40000.0f:b.low),
		bh = RCCONV / ((b.high<=0.0f)?10.0f:b.high);
	float lpRC = al, hpRC = ah,
		lpM = pow(bl/al, 1.0f / float(chunk.length())),
		hpM = pow(bh/ah, 1.0f / float(chunk.length())),
		lpA, hpA, samp,
		dt = 1.0f / float(chunk.format().rate);

	//Apply effect!
	Uint32 chan = source.format().channels;

	for (Uint32 i = 0; i < chan; ++i)
	{
		Sint32 *pos = chunk.start(i), *end = chunk.end(i);
		float &lpPc = lpP[i], &hpDc = hpD[i];
		while (pos < end)
		{
			//Interpolate settings
			lpA = dt   / (lpRC + dt); lpRC *= lpM;
			hpA = hpRC / (hpRC + dt); hpRC *= hpM;

			//Get samples
			samp = float(*pos);

			//Lowpass
			samp = lpPc + lpA * (samp-lpPc);
			lpPc = samp;

			//Highpass (confusing but correct)
			samp = hpA * (samp+hpDc);
			hpDc = samp - lpPc;

			//Set samples
			*pos = Sint32(samp);
			++pos;
		}
	}
}
Пример #13
0
void Signal::pull(AudioChunk &chunk) const
{
	if (!data) {chunk.silence(); return;}

	if (data->trans) data->trans->pull(chunk);
	else data->stream->pull(chunk);
}
Пример #14
0
/* static */ already_AddRefed<AudioBuffer>
AudioBuffer::Create(nsPIDOMWindowInner* aWindow, float aSampleRate,
                    AudioChunk&& aInitialContents)
{
  AudioChunk initialContents = aInitialContents;
  ErrorResult rv;
  RefPtr<AudioBuffer> buffer =
    new AudioBuffer(aWindow, initialContents.ChannelCount(),
                    initialContents.mDuration, aSampleRate, rv);
  if (rv.Failed()) {
    return nullptr;
  }
  buffer->mSharedChannels = Move(aInitialContents);

  return buffer.forget();
}
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock.
 * aBlock must have been allocated with AllocateInputBlock and have a channel
 * count that's a superset of the channels in aInput.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  nsAutoTArray<const void*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    channels.SetLength(aInput.ChannelCount());
    PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, nullptr);
    }
  }

  uint32_t duration = aInput.GetDuration();
  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData =
      static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
    if (channels[c]) {
      switch (aInput.mBufferFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(
            static_cast<const float*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(
            static_cast<const int16_t*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      default:
        NS_ERROR("Unhandled format");
      }
    } else {
      PodZero(outputData, duration);
    }
  }
}
Пример #16
0
		virtual void pull(AudioChunk &chunk)
		{
			//This is possible at startup; race conditions are bad.
			if (!back) chunk.silence();

			//Shorthand.
			Uint32 frame = back->mikeFrame, length = back->mikeLength;
			const Sint32 *data = back->mikeData;

			//How much information is available?
			if (readFrame != frame) {readFrame = frame; prog = 0;}
			Uint32 want = chunk.length(), get = std::min(length-prog, want);

			//Read what we can from the buffer
			std::memcpy((void*)chunk.start(0), (const void*)(data+prog), 4*get);
			prog += get;

			//Fill your cup too full and it will spill...
			if (get < want)
				std::memset((void*)(chunk.start(0)+get), 0, 4*(want-get));
		}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  StreamBuffer::Track* track = EnsureTrack();

  AudioChunk outputChunk;
  AudioSegment* segment = track->Get<AudioSegment>();

  outputChunk.SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    AudioChunk tmpChunk;
    AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
    bool finished = false;
    mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
    if (finished) {
      FinishOutput();
    }
  }

  mLastChunk = outputChunk;
  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&outputChunk);
  } else {
    segment->AppendNullData(outputChunk.GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = outputChunk;
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
Пример #18
0
  // graph thread
  AudioChunk GetOutputBuffer()
  {
    MOZ_ASSERT(!NS_IsMainThread());
    AudioChunk buffer;

    {
      MutexAutoLock lock(mOutputQueue.Lock());
      if (mOutputQueue.ReadyToConsume() > 0) {
        if (mDelaySoFar == STREAM_TIME_MAX) {
          mDelaySoFar = 0;
        }
        buffer = mOutputQueue.Consume();
      } else {
        // If we're out of buffers to consume, just output silence
        buffer.SetNull(WEBAUDIO_BLOCK_SIZE);
        if (mDelaySoFar != STREAM_TIME_MAX) {
          // Remember the delay that we just hit
          mDelaySoFar += WEBAUDIO_BLOCK_SIZE;
        }
      }
    }

    return buffer;
  }
Пример #19
0
void
DelayBuffer::Write(const AudioChunk& aInputChunk)
{
  // We must have a reference to the buffer if there are channels
  MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.mChannelData.Length());
#ifdef DEBUG
  MOZ_ASSERT(!mHaveWrittenBlock);
  mHaveWrittenBlock = true;
#endif

  if (!EnsureBuffer()) {
    return;
  }

  if (mCurrentChunk == mLastReadChunk) {
    mLastReadChunk = -1; // invalidate cache
  }
  mChunks[mCurrentChunk] = aInputChunk;
}
Пример #20
0
nsresult
VorbisTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  if (mEosSetInEncoder) {
    return NS_OK;
  }

  PROFILER_LABEL("VorbisTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);

  nsAutoPtr<AudioSegment> sourceSegment;
  sourceSegment = new AudioSegment();
  {
    // Move all the samples from mRawSegment to sourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized, or when not enough raw data, but is
    // not the end of stream nor is being canceled.
    while (!mCanceled && mRawSegment.GetDuration() < GetPacketDuration() &&
           !mEndOfStream) {
      mon.Wait();
    }
    VORBISLOG("GetEncodedTrack passes wait, duration is %lld\n",
      mRawSegment.GetDuration());
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    sourceSegment->AppendFrom(&mRawSegment);
  }

  if (mEndOfStream && (sourceSegment->GetDuration() == 0)
      && !mEosSetInEncoder) {
    mEncodingComplete = true;
    mEosSetInEncoder = true;
    VORBISLOG("[Vorbis] Done encoding.");
    vorbis_analysis_wrote(&mVorbisDsp, 0);
    GetEncodedFrames(aData);

    return NS_OK;
  }

  // Start encoding data.
  AudioSegment::ChunkIterator iter(*sourceSegment);

  AudioDataValue **vorbisBuffer =
    vorbis_analysis_buffer(&mVorbisDsp, (int)sourceSegment->GetDuration());

  int framesCopied = 0;
  AutoTArray<AudioDataValue, 9600> interleavedPcm;
  AutoTArray<AudioDataValue, 9600> nonInterleavedPcm;
  interleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  nonInterleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  while (!iter.IsEnded()) {
    AudioChunk chunk = *iter;
    int frameToCopy = chunk.GetDuration();
    if (!chunk.IsNull()) {
      InterleaveTrackData(chunk, frameToCopy, mChannels,
                          interleavedPcm.Elements() + framesCopied * mChannels);
    } else { // empty data
      memset(interleavedPcm.Elements() + framesCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }
    framesCopied += frameToCopy;
    iter.Next();
  }
  // De-interleave the interleavedPcm.
  DeInterleaveTrackData(interleavedPcm.Elements(), framesCopied, mChannels,
                        nonInterleavedPcm.Elements());
  // Copy the nonInterleavedPcm to vorbis buffer.
  for(uint8_t i = 0; i < mChannels; ++i) {
    memcpy(vorbisBuffer[i], nonInterleavedPcm.Elements() + framesCopied * i,
           framesCopied * sizeof(AudioDataValue));
  }

  // Now the vorbisBuffer contain the all data in non-interleaved.
  // Tell the library how much we actually submitted.
  vorbis_analysis_wrote(&mVorbisDsp, framesCopied);
  VORBISLOG("vorbis_analysis_wrote framesCopied %d\n", framesCopied);
  GetEncodedFrames(aData);

  return NS_OK;
}
Пример #21
0
void
PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
                                            AudioChunk* aOutput)
{
  if (aInput.IsNull()) {
    *aOutput = aInput;
    return;
  }

  float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
  int inputChannels = aInput.mChannelData.Length();

  // If both the listener are in the same spot, and no cone gain is specified,
  // this node is noop.
  if (mListenerPosition == mPosition &&
      mConeInnerAngle == 360 &&
      mConeOuterAngle == 360) {
    *aOutput = aInput;
    return;
  }

  // The output of this node is always stereo, no matter what the inputs are.
  AllocateAudioBlock(2, aOutput);

  ComputeAzimuthAndElevation(azimuth, elevation);
  coneGain = ComputeConeGain();

  // The following algorithm is described in the spec.
  // Clamp azimuth in the [-90, 90] range.
  azimuth = min(180.f, max(-180.f, azimuth));

  // Wrap around
  if (azimuth < -90.f) {
    azimuth = -180.f - azimuth;
  } else if (azimuth > 90) {
    azimuth = 180.f - azimuth;
  }

  // Normalize the value in the [0, 1] range.
  if (inputChannels == 1) {
    normalizedAzimuth = (azimuth + 90.f) / 180.f;
  } else {
    if (azimuth <= 0) {
      normalizedAzimuth = (azimuth + 90.f) / 90.f;
    } else {
      normalizedAzimuth = azimuth / 90.f;
    }
  }

  distanceGain = ComputeDistanceGain();

  // Actually compute the left and right gain.
  gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume;
  gainR = sin(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume;

  // Compute the output.
  if (inputChannels == 1) {
    GainMonoToStereo(aInput, aOutput, gainL, gainR);
  } else {
    GainStereoToStereo(aInput, aOutput, gainL, gainR, azimuth);
  }

  DistanceAndConeGain(aOutput, distanceGain * coneGain);
}
Пример #22
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    // It is possible for mLastChunks to be empty here, because `a` might be a
    // AudioNodeStream that has not been scheduled yet, because it is further
    // down the graph _but_ as a connection to this node. Because we enforce the
    // presence of at least one DelayNode, with at least one block of delay, and
    // because the output of a DelayNode when it has been fed less that
    // `delayTime` amount of audio is silence, we can simply continue here,
    // because this input would not influence the output of this node. Next
    // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
    // work as usual.
    if (a->mLastChunks.IsEmpty()) {
      continue;
    }

    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 0;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunk;
    // XXX when we implement DelayNode, this will no longer be true and we'll
    // need to treat a null chunk (when the DelayNode hasn't had a chance
    // to produce data yet) as silence here.
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0) {
    aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
    return aTmpChunk;
  }

  if (inputChunkCount == 1) {
    return inputChunks[0];
  }

  AllocateAudioBlock(outputChannelCount, aTmpChunk);

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
      NS_ASSERTION(outputChannelCount == channels.Length(),
                   "We called GetAudioChannelsSuperset to avoid this");
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }

  return aTmpChunk;
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  MOZ_ASSERT(mLastChunks.Length() == 1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    const AudioSegment& inputSegment =
        *static_cast<AudioSegment*>(inputTrack.GetSegment());
    if (inputSegment.IsNull()) {
      continue;
    }

    AudioSegment& segment = *audioSegments.AppendElement();
    GraphTime next;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      StreamTime ticks = outputEnd - outputStart;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        StreamTime inputStart =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mStart));
        StreamTime inputEnd =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mEnd));

        segment.AppendSlice(inputSegment, inputStart, inputEnd);
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - (inputEnd - inputStart));
      }
    }

    for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
      inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  mLastChunks.SetLength(1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (mInputs.IsEmpty()) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  nsAutoTArray<bool,1> trackMapEntriesUsed;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    // Create a TrackMapEntry if necessary.
    size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
    // Maybe there's nothing in this track yet. If so, ignore it. (While the
    // track is only playing silence, we may not be able to determine the
    // correct number of channels to start resampling.)
    if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
      continue;
    }

    while (trackMapEntriesUsed.Length() <= trackMapIndex) {
      trackMapEntriesUsed.AppendElement(false);
    }
    trackMapEntriesUsed[trackMapIndex] = true;

    TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
    AudioSegment segment;
    GraphTime next;
    TrackRate inputTrackRate = inputTrack.GetRate();
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      // Ticks >= startTicks and < endTicks are in the interval
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
                   "Samples missing");
      TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
      TrackTicks ticks = endTicks - startTicks;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        // See comments in TrackUnionStream::CopyTrackData
        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
        StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
        TrackTicks inputTrackEndPoint =
            inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;

        if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
            trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
          // Start of a new series of intervals where neither stream is blocked.
          trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
        }
        TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
        TrackTicks inputEndTicks = inputStartTicks + ticks;
        trackMap->mEndOfConsumedInputTicks = inputEndTicks;
        trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
        trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;

        if (inputStartTicks < 0) {
          // Data before the start of the track is just null.
          segment.AppendNullData(-inputStartTicks);
          inputStartTicks = 0;
        }
        if (inputEndTicks > inputStartTicks) {
          segment.AppendSlice(*inputTrack.GetSegment(),
                              std::min(inputTrackEndPoint, inputStartTicks),
                              std::min(inputTrackEndPoint, inputEndTicks));
        }
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - segment.GetDuration());
      }
    }

    trackMap->mSamplesPassedToResampler += segment.GetDuration();
    trackMap->ResampleInputData(&segment);

    if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
      // We don't have enough data. Delay it.
      trackMap->mResampledData.InsertNullDataAtStart(
        mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
    }
    audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
      mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
  }

  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
    if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
      mTrackMap.RemoveElementAt(i);
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }
  mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
Пример #26
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread() ||
        a->IsAudioParamStream()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  switch (mChannelCountMode) {
  case ChannelCountMode::Explicit:
    // Disregard the output channel count that we've calculated, and just use
    // mNumberOfInputChannels.
    outputChannelCount = mNumberOfInputChannels;
    break;
  case ChannelCountMode::Clamped_max:
    // Clamp the computed output channel count to mNumberOfInputChannels.
    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
    break;
  case ChannelCountMode::Max:
    // Nothing to do here, just shut up the compiler warning.
    break;
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
        NS_ASSERTION(outputChannelCount == channels.Length(),
                     "We called GetAudioChannelsSuperset to avoid this");
      } else {
        // Fill up the remaining channels by zeros
        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
          channels.AppendElement(silenceChannel);
        }
      }
    } else if (channels.Length() > outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
        outputChannels.SetLength(outputChannelCount);
        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
        for (uint32_t j = 0; j < outputChannelCount; ++j) {
          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
        }

        AudioChannelsDownMix(channels, outputChannels.Elements(),
                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);

        channels.SetLength(outputChannelCount);
        for (uint32_t j = 0; j < channels.Length(); ++j) {
          channels[j] = outputChannels[j];
        }
      } else {
        // Drop the remaining channels
        channels.RemoveElementsAt(outputChannelCount,
                                  channels.Length() - outputChannelCount);
      }
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }
}
Пример #27
0
nsresult
OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  PROFILER_LABEL("OpusTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);
  {
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
    // Wait until initialized or cancelled.
    while (!mCanceled && !mInitialized) {
      mReentrantMonitor.Wait();
    }
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }
  }

  // calculation below depends on the truth that mInitialized is true.
  MOZ_ASSERT(mInitialized);

  // re-sampled frames left last time which didn't fit into an Opus packet duration.
  const int framesLeft = mResampledLeftover.Length() / mChannels;
  // When framesLeft is 0, (GetPacketDuration() - framesLeft) is a multiple
  // of kOpusSamplingRate. There is not precision loss in the integer division
  // in computing framesToFetch. If frameLeft > 0, we need to add 1 to
  // framesToFetch to ensure there will be at least n frames after re-sampling.
  const int frameRoundUp = framesLeft ? 1 : 0;

  MOZ_ASSERT(GetPacketDuration() >= framesLeft);
  // Try to fetch m frames such that there will be n frames
  // where (n + frameLeft) >= GetPacketDuration() after re-sampling.
  const int framesToFetch = !mResampler ? GetPacketDuration()
    : (GetPacketDuration() - framesLeft) * mSamplingRate / kOpusSamplingRate
      + frameRoundUp;
  {
    // Move all the samples from mRawSegment to mSourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait until enough raw data, end of stream or cancelled.
    while (!mCanceled && mRawSegment.GetDuration() +
        mSourceSegment.GetDuration() < framesToFetch &&
        !mEndOfStream) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    mSourceSegment.AppendFrom(&mRawSegment);

    // Pad |mLookahead| samples to the end of source stream to prevent lost of
    // original data, the pcm duration will be calculated at rate 48K later.
    if (mEndOfStream && !mEosSetInEncoder) {
      mEosSetInEncoder = true;
      mSourceSegment.AppendNullData(mLookahead);
    }
  }

  // Start encoding data.
  nsAutoTArray<AudioDataValue, 9600> pcm;
  pcm.SetLength(GetPacketDuration() * mChannels);
  AudioSegment::ChunkIterator iter(mSourceSegment);
  int frameCopied = 0;

  while (!iter.IsEnded() && frameCopied < framesToFetch) {
    AudioChunk chunk = *iter;

    // Chunk to the required frame size.
    int frameToCopy = chunk.GetDuration();
    if (frameCopied + frameToCopy > framesToFetch) {
      frameToCopy = framesToFetch - frameCopied;
    }

    if (!chunk.IsNull()) {
      // Append the interleaved data to the end of pcm buffer.
      AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
        pcm.Elements() + frameCopied * mChannels);
    } else {
      memset(pcm.Elements() + frameCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }

    frameCopied += frameToCopy;
    iter.Next();
  }

  RefPtr<EncodedFrame> audiodata = new EncodedFrame();
  audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
  int framesInPCM = frameCopied;
  if (mResampler) {
    nsAutoTArray<AudioDataValue, 9600> resamplingDest;
    // We want to consume all the input data, so we slightly oversize the
    // resampled data buffer so we can fit the output data in. We cannot really
    // predict the output frame count at each call.
    uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
    uint32_t inframes = frameCopied;

    resamplingDest.SetLength(outframes * mChannels);

#if MOZ_SAMPLE_TYPE_S16
    short* in = reinterpret_cast<short*>(pcm.Elements());
    short* out = reinterpret_cast<short*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_int(mResampler, in, &inframes,
                                                        out, &outframes);
#else
    float* in = reinterpret_cast<float*>(pcm.Elements());
    float* out = reinterpret_cast<float*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_float(mResampler, in, &inframes,
                                                          out, &outframes);
#endif

    MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
    PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
        mResampledLeftover.Length());

    uint32_t outframesToCopy = std::min(outframes,
        static_cast<uint32_t>(GetPacketDuration() - framesLeft));

    MOZ_ASSERT(pcm.Length() - mResampledLeftover.Length() >=
        outframesToCopy * mChannels);
    PodCopy(pcm.Elements() + mResampledLeftover.Length(),
        resamplingDest.Elements(), outframesToCopy * mChannels);
    int frameLeftover = outframes - outframesToCopy;
    mResampledLeftover.SetLength(frameLeftover * mChannels);
    PodCopy(mResampledLeftover.Elements(),
        resamplingDest.Elements() + outframesToCopy * mChannels,
        mResampledLeftover.Length());
    // This is always at 48000Hz.
    framesInPCM = framesLeft + outframesToCopy;
    audiodata->SetDuration(framesInPCM);
  } else {
    // The ogg time stamping and pre-skip is always timed at 48000.
    audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
  }

  // Remove the raw data which has been pulled to pcm buffer.
  // The value of frameCopied should equal to (or smaller than, if eos)
  // GetPacketDuration().
  mSourceSegment.RemoveLeading(frameCopied);

  // Has reached the end of input stream and all queued data has pulled for
  // encoding.
  if (mSourceSegment.GetDuration() == 0 && mEndOfStream) {
    mEncodingComplete = true;
    LOG("[Opus] Done encoding.");
  }

  MOZ_ASSERT(mEndOfStream || framesInPCM == GetPacketDuration());

  // Append null data to pcm buffer if the leftover data is not enough for
  // opus encoder.
  if (framesInPCM < GetPacketDuration() && mEndOfStream) {
    PodZero(pcm.Elements() + framesInPCM * mChannels,
        (GetPacketDuration() - framesInPCM) * mChannels);
  }
  nsTArray<uint8_t> frameData;
  // Encode the data with Opus Encoder.
  frameData.SetLength(MAX_DATA_BYTES);
  // result is returned as opus error code if it is negative.
  int result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
  const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
  result = opus_encode(mEncoder, pcmBuf, GetPacketDuration(),
                       frameData.Elements(), MAX_DATA_BYTES);
#else
  const float* pcmBuf = static_cast<float*>(pcm.Elements());
  result = opus_encode_float(mEncoder, pcmBuf, GetPacketDuration(),
                             frameData.Elements(), MAX_DATA_BYTES);
#endif
  frameData.SetLength(result >= 0 ? result : 0);

  if (result < 0) {
    LOG("[Opus] Fail to encode data! Result: %s.", opus_strerror(result));
  }
  if (mEncodingComplete) {
    if (mResampler) {
      speex_resampler_destroy(mResampler);
      mResampler = nullptr;
    }
    mResampledLeftover.SetLength(0);
  }

  audiodata->SwapInFrameData(frameData);
  aData.AppendEncodedFrame(audiodata);
  return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
}
Пример #28
0
void Oscillator::pull(AudioChunk &chunk, const State &a, const State &b)
{
	//Phase goes from -1 to 1.  Wierd?  Maybe.
	while (phase >= 1.0f) phase -= 2.0f;
	while (phase < -1.0f) phase += 2.0f;

	Sint32 samp;
	float v, x, sq;

	float step = (b.freq / output.rate) * 2.0f,
		amp = b.amp * 16777215.0f,
		ph = phase;

	Sint32 *i = chunk.start(0), *e = chunk.end(0);
	switch (type)
	{
	case SINE:
		while (i != e)
		{
			//Simple sine wave
			samp = amp*sin(PI * ph);
			ph += step;
			*i = samp; ++i;
			ph -= 2.0f*int(ph*.5f); //Restrict to [-1, 1]
		}
		break;

	case SQUARE:
		while (i != e)
		{
			samp = amp * ((ph>=0.0f) ? 1.0f : -1.0f);
			ph += step;
			*i = samp; ++i;
			ph -= 2.0f*int(ph*.5f); //Restrict to [-1, 1]
		}
		break;

	case TRIANGLE:
		while (i != e)
		{
			samp = amp * (2.0f*std::abs(ph) - 1.0f);
			ph += step;
			*i = samp; ++i;
			ph -= 2.0f*int(ph*.5f); //Restrict to [-1, 1]
		}
		break;

	case SAWTOOTH:
		while (i != e)
		{
			samp = amp * ph;
			ph += step;
			*i = samp; ++i;
			ph -= 2.0f*int(ph*.5f); //Restrict to [-1, 1]
		}
		break;

	case KLAXON:
	default:
	{
		const float PISQ = PI*PI;
		while (i != e)
		{
			//Sine wave approximation gone wrong
			sq = PISQ*ph*ph; x = PI*ph*amp;
			v = x * 1.02394347; //Makes the endpoints line up
			x *= sq; v -= x/6.0f;
			x *= sq; v += x/120.0f;
			x *= sq; v -= x/5040.0f;
			samp = x;
			ph += step;
			*i = samp; ++i;

			ph -= 2.0f*int(ph*.5f); //Restrict to [-1, 1]
		}
	}
		break;
	}

	phase = ph;

	//Copy signal into all other channels
	for (Uint32 c = chunk.channels()-1; c > 0; --c)
	{
		std::memcpy(chunk.start(c), chunk.start(0), 4*chunk.length());
	}
}