コード例 #1
0
bool
DirectShowReader::DecodeAudioData()
{
    MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread.");
    HRESULT hr;

    SampleSink* sink = mAudioSinkFilter->GetSampleSink();
    if (sink->AtEOS()) {
        // End of stream.
        return Finish(S_OK);
    }

    // Get the next chunk of audio samples. This blocks until the sample
    // arrives, or an error occurs (like the stream is shutdown).
    RefPtr<IMediaSample> sample;
    hr = sink->Extract(sample);
    if (FAILED(hr) || hr == S_FALSE) {
        return Finish(hr);
    }

    int64_t start = 0, end = 0;
    sample->GetMediaTime(&start, &end);
    LOG("DirectShowReader::DecodeAudioData [%4.2lf-%4.2lf]",
        RefTimeToSeconds(start),
        RefTimeToSeconds(end));

    LONG length = sample->GetActualDataLength();
    LONG numSamples = length / mBytesPerSample;
    LONG numFrames = length / mBytesPerSample / mNumChannels;

    BYTE* data = nullptr;
    hr = sample->GetPointer(&data);
    NS_ENSURE_TRUE(SUCCEEDED(hr), Finish(hr));

    nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[numSamples]);
    AudioDataValue* dst = buffer.get();
    if (mBytesPerSample == 1) {
        uint8_t* src = reinterpret_cast<uint8_t*>(data);
        for (int32_t i = 0; i < numSamples; ++i) {
            dst[i] = UnsignedByteToAudioSample(src[i]);
        }
    } else if (mBytesPerSample == 2) {
        int16_t* src = reinterpret_cast<int16_t*>(data);
        for (int32_t i = 0; i < numSamples; ++i) {
            dst[i] = AudioSampleToFloat(src[i]);
        }
    }

    mAudioQueue.Push(new AudioData(mDecoder->GetResource()->Tell(),
                                   RefTimeToUsecs(start),
                                   RefTimeToUsecs(end - start),
                                   numFrames,
                                   buffer.forget(),
                                   mNumChannels));
    return true;
}
コード例 #2
0
bool
DirectShowReader::DecodeAudioData()
{
  MOZ_ASSERT(OnTaskQueue());
  HRESULT hr;

  SampleSink* sink = mAudioSinkFilter->GetSampleSink();
  if (sink->AtEOS()) {
    // End of stream.
    return Finish(S_OK);
  }

  // Get the next chunk of audio samples. This blocks until the sample
  // arrives, or an error occurs (like the stream is shutdown).
  RefPtr<IMediaSample> sample;
  hr = sink->Extract(sample);
  if (FAILED(hr) || hr == S_FALSE) {
    return Finish(hr);
  }

  int64_t start = 0, end = 0;
  sample->GetMediaTime(&start, &end);
  LOG("DirectShowReader::DecodeAudioData [%4.2lf-%4.2lf]",
      RefTimeToSeconds(start),
      RefTimeToSeconds(end));

  LONG length = sample->GetActualDataLength();
  LONG numSamples = length / mBytesPerSample;
  LONG numFrames = length / mBytesPerSample / mNumChannels;

  BYTE* data = nullptr;
  hr = sample->GetPointer(&data);
  NS_ENSURE_TRUE(SUCCEEDED(hr), Finish(hr));

  mAudioCompactor.Push(mDecoder->GetResource()->Tell(),
                       RefTimeToUsecs(start),
                       mInfo.mAudio.mRate,
                       numFrames,
                       mNumChannels,
                       DirectShowCopy(reinterpret_cast<uint8_t *>(data),
                                      mBytesPerSample,
                                      numSamples,
                                      mNumChannels));
  return true;
}
コード例 #3
0
void DSPEngine::handleMessages()
{
	Message* message;
	while((message = m_messageQueue.accept()) != NULL) {
		qDebug("Message: %s", message->getIdentifier());

		if(DSPPing::match(message)) {
			message->completed(m_state);
		} else if(DSPExit::match(message)) {
			gotoIdle();
			m_state = StNotStarted;
			exit();
			message->completed(m_state);
		} else if(DSPAcquisitionStart::match(message)) {
			m_state = gotoIdle();
			if(m_state == StIdle)
				m_state = gotoRunning();
			message->completed(m_state);
		} else if(DSPAcquisitionStop::match(message)) {
			m_state = gotoIdle();
			message->completed(m_state);
		} else if(DSPGetDeviceDescription::match(message)) {
			((DSPGetDeviceDescription*)message)->setDeviceDescription(m_deviceDescription);
			message->completed();
		} else if(DSPGetErrorMessage::match(message)) {
			((DSPGetErrorMessage*)message)->setErrorMessage(m_errorMessage);
			message->completed();
		} else if(DSPSetSource::match(message)) {
			handleSetSource(((DSPSetSource*)message)->getSampleSource());
			message->completed();
		} else if(DSPAddSink::match(message)) {
			SampleSink* sink = ((DSPAddSink*)message)->getSampleSink();
			if(m_state == StRunning) {
				DSPSignalNotification* signal = DSPSignalNotification::create(m_sampleRate, 0, m_tunerFrequency);
				signal->submit(&m_messageQueue, sink);
				sink->start();
			}
			m_sampleSinks.push_back(sink);
			message->completed();
		} else if(DSPRemoveSink::match(message)) {
			SampleSink* sink = ((DSPAddSink*)message)->getSampleSink();
			if(m_state == StRunning)
				sink->stop();
			m_sampleSinks.remove(sink);
			message->completed();
		} else if(DSPAddAudioSource::match(message)) {
			m_audioOutput.addFifo(((DSPAddAudioSource*)message)->getAudioFifo());
			message->completed();
		} else if(DSPRemoveAudioSource::match(message)) {
			m_audioOutput.removeFifo(((DSPAddAudioSource*)message)->getAudioFifo());
			message->completed();
		} else if(DSPConfigureCorrection::match(message)) {
			DSPConfigureCorrection* conf = (DSPConfigureCorrection*)message;
			m_iqImbalanceCorrection = conf->getIQImbalanceCorrection();
			if(m_dcOffsetCorrection != conf->getDCOffsetCorrection()) {
				m_dcOffsetCorrection = conf->getDCOffsetCorrection();
				m_iOffset = 0;
				m_qOffset = 0;
			}
			if(m_iqImbalanceCorrection != conf->getIQImbalanceCorrection()) {
				m_iqImbalanceCorrection = conf->getIQImbalanceCorrection();
				m_iRange = 1 << 16;
				m_qRange = 1 << 16;
				m_imbalance = 65536;
			}
			message->completed();
		} else {
			if(!distributeMessage(message))
				message->completed();
		}
	}
}