コード例 #1
0
PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& es)
{
    ASSERT(mediaStream);
    if (!mediaStream) {
        es.throwDOMException(InvalidStateError);
        return 0;
    }

    ASSERT(isMainThread());
    lazyInitialize();

    AudioSourceProvider* provider = 0;

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();

    // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
    for (size_t i = 0; i < audioTracks.size(); ++i) {
        RefPtr<MediaStreamTrack> localAudio = audioTracks[i];
        if (localAudio->component()->audioSourceProvider()) {
            provider = localAudio->component()->audioSourceProvider();
            break;
        }
    }

    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->setFormat(2, sampleRate());

    refNode(node.get()); // context keeps reference until node is disconnected
    return node;
}
コード例 #2
0
ファイル: Delay.cpp プロジェクト: EQ4/minim-cpp
void Minim::Delay::channelCountChanged()
{
	if ( delayBuffer )
	{
		delete[] delayBuffer;
        delayBuffer = NULL;
	}
	
	if ( sampleRate() > 0 )
	{
		maxBufferSize = (int)( maxDelayTime*sampleRate()*getAudioChannelCount() );
		delayBuffer = new float[maxBufferSize];
		memset(delayBuffer, 0, sizeof(float)*maxBufferSize);
		bufferSizeChanged();
	}
}
ssize_t AudioStreamInStub::read(void* buffer, ssize_t bytes)
{
    // fake timing for audio input
    usleep(bytes * 1000000 / sizeof(int16_t) / AudioSystem::popCount(channels()) / sampleRate());
    memset(buffer, 0, bytes);
    return bytes;
}
コード例 #4
0
status_t AudioStreamInMotorola::dump(int fd, const Vector<String16>& args)
{
    const size_t SIZE = 256;
    char buffer[SIZE];
    String8 result;
    result.append("AudioStreamInMotorola::dump\n");
    snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
    result.append(buffer);
    snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
    result.append(buffer);
    snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
    result.append(buffer);
    snprintf(buffer, SIZE, "\tformat: %d\n", format());
    result.append(buffer);
    snprintf(buffer, SIZE, "\tmHardware: %p\n", mHardware);
    result.append(buffer);
    snprintf(buffer, SIZE, "\tmFd count: %d\n", mFd);
    result.append(buffer);
    snprintf(buffer, SIZE, "\tmStandby: %d\n", mStandby);
    result.append(buffer);
    snprintf(buffer, SIZE, "\tmRetryCount: %d\n", mRetryCount);
    result.append(buffer);
    ::write(fd, result.string(), result.size());
    return NO_ERROR;
}
コード例 #5
0
PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
{
    ASSERT(mediaStream);
    if (!mediaStream) {
        ec = INVALID_STATE_ERR;
        return 0;
    }

    ASSERT(isMainThread());
    lazyInitialize();

    AudioSourceProvider* provider = 0;

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
    for (size_t i = 0; i < audioTracks.size(); ++i) {
        RefPtr<MediaStreamTrack> localAudio = audioTracks[i];
        MediaStreamSource* source = localAudio->source();
        if (!source->deviceId().isEmpty()) {
            destination()->enableInput(source->deviceId());
            provider = destination()->localAudioInputProvider();
            break;
        }
    }

    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->setFormat(2, sampleRate());

    refNode(node.get()); // context keeps reference until node is disconnected
    return node;
}
コード例 #6
0
double WaveShaperDSPKernel::latencyTime() const {
  size_t latencyFrames = 0;
  WaveShaperDSPKernel* kernel = const_cast<WaveShaperDSPKernel*>(this);

  switch (kernel->getWaveShaperProcessor()->oversample()) {
    case WaveShaperProcessor::OverSampleNone:
      break;
    case WaveShaperProcessor::OverSample2x:
      latencyFrames += m_upSampler->latencyFrames();
      latencyFrames += m_downSampler->latencyFrames();
      break;
    case WaveShaperProcessor::OverSample4x: {
      // Account for first stage upsampling.
      latencyFrames += m_upSampler->latencyFrames();
      latencyFrames += m_downSampler->latencyFrames();

      // Account for second stage upsampling.
      // and divide by 2 to get back down to the regular sample-rate.
      size_t latencyFrames2 =
          (m_upSampler2->latencyFrames() + m_downSampler2->latencyFrames()) / 2;
      latencyFrames += latencyFrames2;
      break;
    }
    default:
      ASSERT_NOT_REACHED();
  }

  return static_cast<double>(latencyFrames) / sampleRate();
}
コード例 #7
0
double AudioBufferSourceNode::totalPitchRate()
{
    double dopplerRate = 1.0;
    if (m_pannerNode)
        dopplerRate = m_pannerNode->dopplerRate();

    // Incorporate buffer's sample-rate versus AudioContext's sample-rate.
    // Normally it's not an issue because buffers are loaded at the AudioContext's sample-rate, but we can handle it in any case.
    double sampleRateFactor = 1.0;
    if (buffer())
        sampleRateFactor = buffer()->sampleRate() / sampleRate();

    double basePitchRate = playbackRate()->value();

    double totalRate = dopplerRate * sampleRateFactor * basePitchRate;

    totalRate = std::max(-MaxRate, std::min(MaxRate, totalRate));

    bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate);
    ASSERT(isTotalRateValid);
    if (!isTotalRateValid)
        totalRate = 1.0;

    return totalRate;
}
コード例 #8
0
double AudioBufferSourceNode::totalPitchRate()
{
    double dopplerRate = 1.0;
    if (m_pannerNode)
        dopplerRate = m_pannerNode->dopplerRate();

    // Incorporate buffer's sample-rate versus AudioContext's sample-rate.
    // Normally it's not an issue because buffers are loaded at the AudioContext's sample-rate, but we can handle it in any case.
    double sampleRateFactor = 1.0;
    if (buffer())
        sampleRateFactor = buffer()->sampleRate() / sampleRate();

    double basePitchRate = playbackRate()->value();

    double totalRate = dopplerRate * sampleRateFactor * basePitchRate;

    // Sanity check the total rate.  It's very important that the resampler not get any bad rate values.
    totalRate = std::max(0.0, totalRate);
    if (!totalRate)
        totalRate = 1; // zero rate is considered illegal
    totalRate = std::min(MaxRate, totalRate);

    bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate);
    ASSERT(isTotalRateValid);
    if (!isTotalRateValid)
        totalRate = 1.0;

    return totalRate;
}
コード例 #9
0
void MediaStreamAudioSourceNode::setFormat(size_t numberOfChannels, float sourceSampleRate)
{
    if (numberOfChannels != m_sourceNumberOfChannels || sourceSampleRate != sampleRate()) {
        // The sample-rate must be equal to the context's sample-rate.
        if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || sourceSampleRate != sampleRate()) {
            // process() will generate silence for these uninitialized values.
            LOG(Media, "MediaStreamAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
            m_sourceNumberOfChannels = 0;
            return;
        }

        // Synchronize with process().
        MutexLocker locker(m_processLock);

        m_sourceNumberOfChannels = numberOfChannels;

        {
            // The context must be locked when changing the number of output channels.
            AudioContext::AutoLocker contextLocker(context());

            // Do any necesssary re-configuration to the output's number of channels.
            output(0)->setNumberOfChannels(numberOfChannels);
        }
    }
}
コード例 #10
0
void MediaElementAudioSourceHandler::process(size_t numberOfFrames) {
  AudioBus* outputBus = output(0).bus();

  // Use a tryLock() to avoid contention in the real-time audio thread.
  // If we fail to acquire the lock then the HTMLMediaElement must be in the
  // middle of reconfiguring its playback engine, so we output silence in this
  // case.
  MutexTryLocker tryLocker(m_processLock);
  if (tryLocker.locked()) {
    if (!mediaElement() || !m_sourceNumberOfChannels || !m_sourceSampleRate) {
      outputBus->zero();
      return;
    }
    AudioSourceProvider& provider = mediaElement()->getAudioSourceProvider();
    // Grab data from the provider so that the element continues to make
    // progress, even if we're going to output silence anyway.
    if (m_multiChannelResampler.get()) {
      DCHECK_NE(m_sourceSampleRate, sampleRate());
      m_multiChannelResampler->process(&provider, outputBus, numberOfFrames);
    } else {
      // Bypass the resampler completely if the source is at the context's
      // sample-rate.
      DCHECK_EQ(m_sourceSampleRate, sampleRate());
      provider.provideInput(outputBus, numberOfFrames);
    }
    // Output silence if we don't have access to the element.
    if (!passesCORSAccessCheck()) {
      if (m_maybePrintCORSMessage) {
        // Print a CORS message, but just once for each change in the current
        // media element source, and only if we have a document to print to.
        m_maybePrintCORSMessage = false;
        if (context()->getExecutionContext()) {
          context()->getExecutionContext()->postTask(
              BLINK_FROM_HERE,
              createCrossThreadTask(
                  &MediaElementAudioSourceHandler::printCORSMessage,
                  PassRefPtr<MediaElementAudioSourceHandler>(this),
                  m_currentSrcString));
        }
      }
      outputBus->zero();
    }
  } else {
    // We failed to acquire the lock.
    outputBus->zero();
  }
}
コード例 #11
0
ファイル: MoogFilter.cpp プロジェクト: EQ4/minim-cpp
void Minim::MoogFilter::uGenerate( float * out, const int numChannels )
{
	// Set coefficients given frequency & resonance [0.0...1.0]
	float t1, t2; // temporary buffers
	float normFreq = frequency.getLastValue() / ( sampleRate() * 0.5f );
	float rez = constrain( resonance.getLastValue(), 0.f, 1.f );
	
	float q = 1.0f - normFreq;
	float p = normFreq + 0.8f * normFreq * q;
	float f = p + p - 1.0f;
	q = rez * ( 1.0f + 0.5f * q * ( 1.0f - q + 5.6f * q * q ) );
	
	const float* input = audio.getLastValues();
	
	for ( int i = 0; i < numChannels; ++i )
	{
		// Filter (in [-1.0...+1.0])
		float* b = coeff[i];
		float in = constrain( input[i], -1, 1 ); // hard clip
		//float in = atanf(input[i]) * M_2_PI; // soft clip
		
		in -= q * b[4]; // feedback
		
		t1 = b[1];
		b[1] = ( in + b[0] ) * p - b[1] * f;
		
		t2 = b[2];
		b[2] = ( b[1] + t1 ) * p - b[2] * f;
		
		t1 = b[3];
		b[3] = ( b[2] + t2 ) * p - b[3] * f;
		
		b[4] = ( b[3] + t1 ) * p - b[4] * f;
		b[4] = b[4] - b[4] * b[4] * b[4] * 0.166667f; // clipping
        
        // inelegantly squash denormals
        if ( isnan(b[4]) )
        {
            memset(b, 0, sizeof(float)*5);
        }
		
		b[0] = in;
        
        switch( type )
        {
            case HP:
                out[i] = in - b[4];
                break;
                
            case LP:
                out[i] = b[4];
                break;
                
            case BP:
                out[i] = 3.0f * (b[3] - b[4]);
                break;
        }
	}
}
コード例 #12
0
ファイル: AudioFormat.cpp プロジェクト: DavidJohnSmith/QtAV
/*!
    Returns the number of microseconds represented by \a bytes in this format.

    Returns 0 if this format is not valid->

    Note that some rounding may occur if \a bytes is not an exact multiple
    of the number of bytes per frame.

    \sa bytesForDuration()
*/
qint64 AudioFormat::durationForBytes(qint32 bytes) const
{
    if (!isValid() || bytes <= 0)
        return 0;

    // We round the byte count to ensure whole frames
    return qint64(kHz * (bytes / bytesPerFrame())) / sampleRate();
}
コード例 #13
0
ファイル: ADSREnvOld.cpp プロジェクト: dreieier/Nexus
void ADSREnvOld::setReleaseTime( FLOAT time )
{
	if( time < 0.0 ) {
		negativeRatesError();
		time *= -1;
	}
	releaseRate_ = sustainLevel_ / ( time * sampleRate() );
}
コード例 #14
0
ファイル: ADSREnvOld.cpp プロジェクト: dreieier/Nexus
void ADSREnvOld::setAttackTime( FLOAT time )
{
	if( time < 0.0 ) {
		negativeRatesError();
		time *= -1;
	}
	attackRate_ = 1.0f / ( time * sampleRate() );
}
コード例 #15
0
void DynamicsCompressorHandler::initialize()
{
    if (isInitialized())
        return;

    AudioHandler::initialize();
    m_dynamicsCompressor = wrapUnique(new DynamicsCompressor(sampleRate(), defaultNumberOfOutputChannels));
}
コード例 #16
0
void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionState& es)
{
    if (!audioData) {
        es.throwDOMException(SyntaxError);
        return;
    }
    m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
コード例 #17
0
ファイル: HRTFPanner.cpp プロジェクト: mirror/chromium
double HRTFPanner::tailTime() const {
  // Because HRTFPanner is implemented with a DelayKernel and a FFTConvolver,
  // the tailTime of the HRTFPanner is the sum of the tailTime of the
  // DelayKernel and the tailTime of the FFTConvolver, which is
  // MaxDelayTimeSeconds and fftSize() / 2, respectively.
  return MaxDelayTimeSeconds +
         (fftSize() / 2) / static_cast<double>(sampleRate());
}
コード例 #18
0
void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
{
    if (!audioData) {
        ec = SYNTAX_ERR;
        return;
    }
    m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
ssize_t AudioStreamInStub::read(void* buffer, ssize_t bytes)
{
    // fake timing for audio input
    usleep(bytes * 1000000 / sizeof(int16_t) /
           audio_channel_count_from_in_mask(channels()) / sampleRate());
    memset(buffer, 0, bytes);
    return bytes;
}
コード例 #20
0
void DynamicsCompressorNode::initialize()
{
    if (isInitialized())
        return;

    AudioNode::initialize();
    m_dynamicsCompressor = adoptPtr(new DynamicsCompressor(sampleRate(), defaultNumberOfOutputChannels));
}
status_t AudioStreamOutStub::set(int *pFormat, uint32_t *pChannels, uint32_t *pRate)
{
    if (pFormat) *pFormat = format();
    if (pChannels) *pChannels = channels();
    if (pRate) *pRate = sampleRate();

    return NO_ERROR;
}
コード例 #22
0
ファイル: ADSREnvOld.cpp プロジェクト: dreieier/Nexus
void ADSREnvOld::setDecayTime( FLOAT time )
{
	if( time < 0.0 ) {
		negativeRatesError();
		time *= -1;
	}
	decayRate_ = 1.0f / ( time * sampleRate() );
}
コード例 #23
0
ファイル: StereoPannerNode.cpp プロジェクト: mirror/chromium
void StereoPannerHandler::initialize() {
  if (isInitialized())
    return;

  m_stereoPanner = StereoPanner::create(sampleRate());

  AudioHandler::initialize();
}
コード例 #24
0
void StereoPannerHandler::initialize()
{
    if (isInitialized())
        return;

    m_stereoPanner = Spatializer::create(Spatializer::PanningModelEqualPower, sampleRate());

    AudioHandler::initialize();
}
コード例 #25
0
void PannerNode::initialize()
{
    if (isInitialized())
        return;

    m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatabaseLoader());

    AudioNode::initialize();
}
コード例 #26
0
double ConvolverNode::latencyTime() const
{
    MutexTryLocker tryLocker(m_processLock);
    if (tryLocker.locked())
        return m_reverb ? m_reverb->latencyFrames() / static_cast<double>(sampleRate()) : 0;
    // Since we don't want to block the Audio Device thread, we return a large value
    // instead of trying to acquire the lock.
    return std::numeric_limits<double>::infinity();
}
コード例 #27
0
status_t A2dpAudioInterface::A2dpAudioStreamOut::set(
        int format, int channels, uint32_t rate)
{
    LOGD("A2dpAudioStreamOut::set %d, %d, %d\n", format, channels, rate);

    // fix up defaults
    if (format == 0) format = AudioSystem::PCM_16_BIT;
    if (channels == 0) channels = channelCount();
    if (rate == 0) rate = sampleRate();

    // check values
    if ((format != AudioSystem::PCM_16_BIT) ||
            (channels != channelCount()) ||
            (rate != sampleRate()))
        return BAD_VALUE;

    return NO_ERROR;
}
コード例 #28
0
ファイル: PannerNode.cpp プロジェクト: harlanlewis/webkit
void PannerNode::initialize()
{
    if (isInitialized())
        return;
        
    m_panner = Panner::create(m_panningModel, sampleRate());

    AudioNode::initialize();
}
コード例 #29
0
GainNode* AudioContext::createGain(ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (isContextClosed()) {
        throwExceptionForClosedState(exceptionState);
        return nullptr;
    }

    return GainNode::create(this, sampleRate());
}
コード例 #30
0
ファイル: PannerNode.cpp プロジェクト: eswartz/LabSound
void PannerNode::initialize()
{
    if (isInitialized())
        return;
    
	switch (m_panningModel) 
	{
		case PanningMode::EQUALPOWER: 
			m_panner = std::unique_ptr<Panner>(new EqualPowerPanner(sampleRate()));
			break;
		case PanningMode::HRTF: 
			m_panner = std::unique_ptr<Panner>(new HRTFPanner(sampleRate()));
			break;
		default:
			throw std::runtime_error("invalid panning model");
	}

    AudioNode::initialize();
}