Ejemplo n.º 1
0
AudioMixer::AudioMixer(Profile *profile)
	: QObject()
	, m_profile(profile)
	, m_inputs()
	, m_metronomeEnabledRef(0)

	// Master state
	, m_outStats(getSampleRate(), getNumChannels())
	, m_masterVolume(1.0f)
	, m_masterAttenuation(0)

	// Synchronisation
	, m_syncedInputs()
	, m_refTimestampUsec(App->getUsecSinceFrameOrigin())
	, m_minInputDelayUsec(0)
	, m_sampleNum(0)
{
	// We process audio at the same frequency as the video framerate as it
	// allows us to do more bulk processing
	connect(App, &Application::realTimeFrameEvent,
		this, &AudioMixer::realTimeFrameEvent);

	// Watch profile for audio mode changes
	connect(profile, &Profile::audioModeChanged,
		this, &AudioMixer::audioModeChanged);
}
Ejemplo n.º 2
0
bool RHD2000Thread::enableHeadstage(int hsNum, bool enabled)
{

    evalBoard->enableDataStream(hsNum, enabled);

    if (enabled)
    {
        numChannelsPerDataStream.set(hsNum, 32);
    }
    else
    {
        numChannelsPerDataStream.set(hsNum, 0);
    }

    std::cout << "Enabled channels: ";

    for (int i = 0; i < MAX_NUM_DATA_STREAMS; i++)
    {
        std::cout << numChannelsPerDataStream[i] << " ";
    }

    std:: cout << std::endl;


    dataBuffer->resize(getNumChannels(), 10000);

    return true;
}
Ejemplo n.º 3
0
void RecordBufUGenInternal::processBlock(bool& shouldDelete, const unsigned int blockID, const int /*channel*/) throw()
{
	const int blockSize = uGenOutput.getBlockSize();
	
	int channelBufferPos = 0;
	
	for(int channel = 0; channel < getNumChannels(); channel++)
	{
		int numSamplesToProcess = blockSize;
		const int bufferSize = buffer_.size();
		channelBufferPos = bufferPos;
		float* outputSamples = proxies[channel]->getSampleData();
		float* inputSamples = inputs[Input].processBlock(shouldDelete, blockID, channel);
		float* preLevelSamples = inputs[PreLevel].processBlock(shouldDelete, blockID, channel);
		float* recLevelSamples = inputs[RecLevel].processBlock(shouldDelete, blockID, channel);
		float* loopSamples = inputs[Loop].processBlock(shouldDelete, blockID, 0);
		float* bufferSamples = buffer_.getData(channel);
		
		while(numSamplesToProcess) 
		{										
			if(*loopSamples++ >= 0.5f) 
			{
				if(channelBufferPos >= bufferSize)
					channelBufferPos = 0;
				
				float recLevel = *recLevelSamples++;
				float rec = *inputSamples++ * recLevel;
				float preLevel = *preLevelSamples++;
				float pre = bufferSamples[channelBufferPos] * preLevel;
				float out = pre + rec;
				*outputSamples++ = out;
				bufferSamples[channelBufferPos] = out;
				channelBufferPos++;
				
				if(channelBufferPos >= bufferSize)
					channelBufferPos = 0;
			}
			else
			{
				float rec = *inputSamples++ * *recLevelSamples++;
				float pre = bufferSamples[channelBufferPos] * *preLevelSamples++;
				float out = pre + rec;
				*outputSamples++ = out;
				bufferSamples[channelBufferPos] = out;
				channelBufferPos++;
			}
			
			--numSamplesToProcess;
		}		
	}
	
	bufferPos = channelBufferPos;
	
	if(bufferPos >= buffer_.size())
	{
		shouldDelete = shouldDelete ? true : shouldDeleteValue;
		setIsDone();
	}	
}
Ejemplo n.º 4
0
void RHD2000Thread::enableAdcs(bool t)
{

    acquireAdcChannels = t;

    dataBuffer->resize(getNumChannels(), 10000);

}
Ejemplo n.º 5
0
void BufferRecorderNode::writeToFile( const fs::path &filePath, SampleType sampleType )
{
	size_t currentWritePos = mWritePos;
	BufferRef copiedBuffer = getRecordedCopy();

	audio::TargetFileRef target = audio::TargetFile::create( filePath, getSampleRate(), getNumChannels(), sampleType );
	target->write( copiedBuffer.get(), currentWritePos );
}
Ejemplo n.º 6
0
void OggAudioSource::setDecoderPosition(Int64 startFrame)
{
	RScopedLock l(&mDecodeLock);

	ov_pcm_seek(&mOggFile, startFrame * getNumChannels());
	if(startFrame < getLength() * getSampleRate())
		mEOF = false;
}
Ejemplo n.º 7
0
void MonitorNode::process( Buffer *buffer )
{
	size_t numFrames = std::min( buffer->getNumFrames(), mRingBuffers[0].getSize() );
	for( size_t ch = 0; ch < getNumChannels(); ch++ ) {
		if( ! mRingBuffers[ch].write( buffer->getChannel( ch ), numFrames ) )
			return;
	}
}
Ejemplo n.º 8
0
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source,
                                              int numSamplesToRead,
                                              const int samplesPerBlock)
{
    const int maxChans = 128;
    AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock);
    int* buffers [maxChans];

    while (numSamplesToRead > 0)
    {
        const int numToDo = jmin (numSamplesToRead, samplesPerBlock);

        AudioSourceChannelInfo info;
        info.buffer = &tempBuffer;
        info.startSample = 0;
        info.numSamples = numToDo;
        info.clearActiveBufferRegion();

        source.getNextAudioBlock (info);

        int i;
        for (i = maxChans; --i >= 0;)
            buffers[i] = 0;

        for (i = tempBuffer.getNumChannels(); --i >= 0;)
            buffers[i] = (int*) tempBuffer.getSampleData (i, 0);

        if (! isFloatingPoint())
        {
            int** bufferChan = buffers;

            while (*bufferChan != 0)
            {
                int* b = *bufferChan++;

                // float -> int
                for (int j = numToDo; --j >= 0;)
                {
                    const double samp = *(const float*) b;

                    if (samp <= -1.0)
                        *b++ = INT_MIN;
                    else if (samp >= 1.0)
                        *b++ = INT_MAX;
                    else
                        *b++ = roundToInt (INT_MAX * samp);
                }
            }
        }

        if (! write ((const int**) buffers, numToDo))
            return false;

        numSamplesToRead -= numToDo;
    }

    return true;
}
Ejemplo n.º 9
0
void StereoSample::addSamplesStereo(float *samples, int length) {
		
	if(length>BUFF_SIZE) {
		printf("Buffer not big enough!!\n");
		memset(samples, 0, length*2*sizeof(float));
		return;
	}
		
	// get the samples to work on
	getSamples(StereoSample_buffer, length);
	float lerpAmt = 0.05f;
    
    if(fadePos>-1) {
        targetVolume = getFadeVolume();
        fadePos += length;
    }
    

	if(getNumChannels()==1) {
		for(int i = 0; i < length; i++) {
            volume = volume*(1.f-lerpAmt) + targetVolume*lerpAmt;
            
			pan = pan*(1.f-lerpAmt) + targetPan*lerpAmt;				
				
			samples[i*2] += StereoSample_buffer[i]*(1 - pan)*volume;
			samples[i*2 + 1] += StereoSample_buffer[i]*pan*volume;
		}
	} else if(getNumChannels()==2) {
		for(int i = 0; i < length; i++) {
				
			volume = volume*(1.f-lerpAmt) + targetVolume*lerpAmt;
			pan = pan*(1.f-lerpAmt) + targetPan*lerpAmt;
				
			samples[i*2] += StereoSample_buffer[i*2]*(1 - pan)*volume;
			samples[i*2 + 1] += StereoSample_buffer[i*2+1]*pan*volume;
		}
	}
    if(fadePos>=fadeDuration) {
        fadePos = -1;
        if(targetVolume<0.001 && volume<0.001) {
            stop();
        }
    }
		
}
Ejemplo n.º 10
0
bool RawInputUGenInternal::setInput(const float* block, const int channel) throw() 
{ 
	ugen_assert(channel >= 0);
	ugen_assert(channel < getNumChannels());
	ugen_assert(block != 0);
	
	bufferData[channel] = block; 
	return true;
}
Ejemplo n.º 11
0
akAnimationClip::~akAnimationClip()
{
	akAnimationChannel** ptr = m_channels.ptr();
	int len = getNumChannels(), i;
	for (i = 0; i < len; ++i)
		delete ptr[i];
	
	m_channels.clear();
}
Ejemplo n.º 12
0
ATMO_BOOL CMoMoConnection::CreateDefaultMapping(CAtmoChannelAssignment *ca)
{
   if(!ca) return ATMO_FALSE;
   ca->setSize( getNumChannels() );  // oder 4 ? depending on config!
   ca->setZoneIndex(0, 0); // Zone 5
   ca->setZoneIndex(1, 1);
   ca->setZoneIndex(2, 2);
   ca->setZoneIndex(3, 3);
   return ATMO_TRUE;
}
Ejemplo n.º 13
0
void BufferRecorderNode::initialize()
{
	// adjust recorder buffer to match channels once initialized, since they could have changed since construction.
	bool resize = mRecorderBuffer.getNumFrames() != 0;
	mRecorderBuffer.setNumChannels( getNumChannels() );

	// if the buffer had already been resized, zero out any posisbly existing data.
	if( resize )
		mRecorderBuffer.zero();
}
Ejemplo n.º 14
0
// ----------------------------------------------------------------------------
//
bool Fixture::setPhysicalChannels( PhysicalChannels& channel_map ) {
    if ( channel_map.size() == 0 )
        m_channel_map.clear();
    else if ( channel_map.size() == getNumChannels() )
        m_channel_map = channel_map;
    else
        return false;

    return true;
}
Ejemplo n.º 15
0
void akAnimationClip::evaluate(akSkeletonPose* pose, akScalar time, akScalar weight, akScalar delta, const akJointMask* mask) const
{
	akAnimationChannel* const* ptr = m_channels.ptr();
	int len = getNumChannels();
	
	for(int i=0; i<len; i++)
	{
		ptr[i]->evaluate(*pose, time, weight, delta, mask);
	}
}
Ejemplo n.º 16
0
unsigned char* OpenCVImage::setData(unsigned char* data, bool ownership)
{
    m_own = ownership;
    unsigned char* tmp = getData();

    int channels = getNumChannels();

    cvSetImageData(m_img, data, cvGetSize(m_img).width * channels);
    return tmp;
}
Ejemplo n.º 17
0
// TODO: When getNumChannels() > 1, use generic channel converter.
// - alternatively, this tap can force mono output, which only works if it isn't a tap but is really a leaf node (no output).
const std::vector<float>& MonitorSpectralNode::getMagSpectrum()
{
	uint64_t numFramesProcessed = getContext()->getNumProcessedFrames();
	if( mLastFrameMagSpectrumComputed == numFramesProcessed )
		return mMagSpectrum;

	mLastFrameMagSpectrumComputed = numFramesProcessed;

	fillCopiedBuffer();

	// window the copied buffer and compute forward FFT transform
	if( getNumChannels() > 1 ) {
		// naive average of all channels
		mFftBuffer.zero();
		float scale = 1.0f / getNumChannels();
		for( size_t ch = 0; ch < getNumChannels(); ch++ ) {
			for( size_t i = 0; i < mWindowSize; i++ )
				mFftBuffer[i] += mCopiedBuffer.getChannel( ch )[i] * scale;
		}
		dsp::mul( mFftBuffer.getData(), mWindowingTable.get(), mFftBuffer.getData(), mWindowSize );
	}
	else
		dsp::mul( mCopiedBuffer.getData(), mWindowingTable.get(), mFftBuffer.getData(), mWindowSize );

	mFft->forward( &mFftBuffer, &mBufferSpectral );

	float *real = mBufferSpectral.getReal();
	float *imag = mBufferSpectral.getImag();

	// remove Nyquist component
	imag[0] = 0.0f;

	// compute normalized magnitude spectrum
	// TODO: break this into vector cartesian -> polar and then vector lowpass. skip lowpass if smoothing factor is very small
	const float magScale = 1.0f / mFft->getSize();
	for( size_t i = 0; i < mMagSpectrum.size(); i++ ) {
		float re = real[i];
		float im = imag[i];
		mMagSpectrum[i] = mMagSpectrum[i] * mSmoothingFactor + std::sqrt( re * re + im * im ) * magScale * ( 1 - mSmoothingFactor );
	}

	return mMagSpectrum;
}
bool ExtAudioFileAudioSource::init(const RString& path, bool loadIntoMemory)
{
	if(mLoadedInMemory && loadIntoMemory)
		return true;
	
    // FIXME: query the file to find out how many channels instead of hardcoding.
    
    CFURLRef path_url = CFURLCreateFromFileSystemRepresentation (kCFAllocatorDefault,
                                                                 (const UInt8*)path.data(),
                                                                 path.length(),
                                                                 false
                                                                 );
    
    OSStatus err = ExtAudioFileOpenURL(path_url, &mAudioFile);
    if(err)
    {
        printCode("ExtAudioFileOpenURL: ", err);        
        return false;
    }
    
    UInt32 propSize = sizeof(mClientFormat);
    err = ExtAudioFileGetProperty(mAudioFile, kExtAudioFileProperty_FileDataFormat, &propSize, &mClientFormat);    

    propSize = sizeof(mTotalFrames);
    err = ExtAudioFileGetProperty(mAudioFile, kExtAudioFileProperty_FileLengthFrames, &propSize, &mTotalFrames);    
    
    // Setup output format
    FillOutASBDForLPCM(mFormat, getSampleRate(), getNumChannels(), 32, 32, true, false,true);
    
    err = ExtAudioFileSetProperty(mAudioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(mFormat), &mFormat);
    if(err)
    {
        printCode("ExtAudioFileSetProperty: ", err);
        return false;
    }
    
    // Allocate our buffer list with NUM_CHANNELS buffers in it.
    mpBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + (getNumChannels() - 1) * sizeof(AudioBuffer));
    mpBufferList->mNumberBuffers = getNumChannels();    

    return BufferedAudioSource::init(path, loadIntoMemory);
}
Ejemplo n.º 19
0
ATMO_BOOL CAtmoMultiConnection::CreateDefaultMapping(CAtmoChannelAssignment *ca)
{
  if(!ca) return ATMO_FALSE;
  int z = getNumChannels();
  ca->setSize( z );
  // 1 : 1 mapping vorschlagen...
  for(int i = 0; i < z ; i++ ) {
      ca->setZoneIndex( i, i );
  }
  return ATMO_TRUE;
}
Ejemplo n.º 20
0
void DelayNode::setMaxDelaySeconds( float seconds )
{
	size_t delayFrames = lroundf( seconds * getSampleRate() );

	size_t delayBufferFrames = max( getFramesPerBlock(), delayFrames ) + 1;

	mDelayBuffer.setSize( delayBufferFrames, getNumChannels() );
	
	mMaxDelaySeconds = seconds;
	mWriteIndex = 0;
}
Ejemplo n.º 21
0
ALboolean
_alutGetFormat (const BufferData *bufferData, ALenum *format)
{
  if (!_alutFormatConstruct
      (getNumChannels (bufferData), getBitsPerSample (bufferData), format))
    {
      _alutSetError (ALUT_ERROR_UNSUPPORTED_FILE_SUBTYPE);
      return AL_FALSE;
    }
  return AL_TRUE;
}
Ejemplo n.º 22
0
void MultiSliderUGenInternal::timerCallback()
{
	for(int i = 0; i < getNumChannels(); i++)
	{
#ifdef __JUCE_NOTIFICATIONTYPE_JUCEHEADER__
        sliders->getSlider(i)->setValue(values[i], sendNotificationAsync);
#else
		sliders->getSlider(i)->setValue(values[i], true, false);
#endif
	}
}
Ejemplo n.º 23
0
void DsdSampleReader::clearBuffer() 
{
	if (!isBufferAllocated) {
		allocateBuffer();
		return;
	}
	
	dsf2flac_uint8 c = getIdleSample();
	for (dsf2flac_uint32 i = 0; i<getNumChannels(); i++)
		for (dsf2flac_uint32 j=0; j<getBufferLength(); j++)
			circularBuffers[i].push_front(c);
}
void OutputDeviceNodeXAudio::initialize()
{
	CI_ASSERT_MSG( getNumChannels() <= 2, "number of channels greater than 2 is not supported." );

	auto internalBuffer = getInternalBuffer();
	size_t numSamples = internalBuffer->getSize();

	memset( &mXAudioBuffer, 0, sizeof( mXAudioBuffer ) );
	mXAudioBuffer.AudioBytes = numSamples * sizeof( float );
	if( getNumChannels() == 2 ) {
		// setup stereo, XAudio2 requires interleaved samples so point at interleaved buffer
		mBufferInterleaved = BufferInterleaved( internalBuffer->getNumFrames(), internalBuffer->getNumChannels() );
		mXAudioBuffer.pAudioData = reinterpret_cast<BYTE *>( mBufferInterleaved.getData() );
	}
	else {
		// setup mono
		mXAudioBuffer.pAudioData = reinterpret_cast<BYTE *>( internalBuffer->getData() );
	}

	initSourceVoice();
}
Ejemplo n.º 25
0
ofImageType ofPixels_<PixelType>::getImageType() const{
	switch(getNumChannels()){
	case 1:
		return OF_IMAGE_GRAYSCALE;
	case 3:
		return OF_IMAGE_COLOR;
	case 4:
		return OF_IMAGE_COLOR_ALPHA;
	default:
		return OF_IMAGE_UNDEFINED;
	}
}
Ejemplo n.º 26
0
void akAnimationClip::evaluate(akTransformState* pose, akScalar time, akScalar weight, akScalar delta) const
{
	akAnimationChannel* const* ptr = m_channels.ptr();
	int len = getNumChannels();
	
	for(int i=0; i<len; i++)
	{
		if(ptr[i]->getType() == akAnimationChannel::AC_TRANSFORM)
		{
			ptr[i]->evaluate(*pose, time, weight, delta);
		}
	}
}
void OutputDeviceNodeXAudio::initSourceVoice()
{
	CI_ASSERT( ! mSourceVoice );

	auto context = dynamic_pointer_cast<ContextXAudio>( getContext() );

	auto wfx = msw::interleavedFloatWaveFormat( getSampleRate(), getNumChannels() );

	IXAudio2 *xaudio = context->getXAudio();
	UINT32 flags = ( mFilterEnabled ? XAUDIO2_VOICE_USEFILTER : 0 );
	HRESULT hr = xaudio->CreateSourceVoice( &mSourceVoice, wfx.get(), flags, XAUDIO2_DEFAULT_FREQ_RATIO, mVoiceCallback.get() );
	CI_ASSERT( hr == S_OK );
}
Ejemplo n.º 28
0
void BankBaseUGenInternal::processBlock(bool& shouldDelete, const unsigned int blockID, const int /*channel*/) throw()
{	
	const int numSamplesToProcess = uGenOutput.getBlockSize();	
	const int numChannels = getNumChannels();
	for(int channel = 0; channel < numChannels; channel++)
	{
		bufferData[channel] = proxies[channel]->getSampleData();
	}
	
	mixer.prepareForBlock(numSamplesToProcess, blockID, -1);
	mixer.setOutputs(bufferData, numSamplesToProcess, numChannels);
	mixer.processBlock(shouldDelete, blockID, -1);
}
Ejemplo n.º 29
0
void BufferValuesUGenInternal::processBlock(bool& /*shouldDelete*/, 
											const unsigned int /*blockID*/, 
											const int /*channel*/) throw()
{
	for(int channel = 0; channel < getNumChannels(); channel++)
	{
		int numSamplesToProcess = uGenOutput.getBlockSize();
		float* outputSamples = proxies[channel]->getSampleData();
		float value = buffer[channel];
		
		while(numSamplesToProcess--) *outputSamples++ = value;
	}
}
Ejemplo n.º 30
0
  void OutputDeviceNodeWebAudio::initialize()
  {
    CI_LOG_I( "OutputDevicenode :: Initialize called" );

    const size_t sampleRate = getOutputSampleRate();
    const size_t framesPerBlock = getOutputFramesPerBlock();
    const size_t numChannels = getNumChannels();

    mInterleavedBuffer = BufferInterleaved( framesPerBlock, numChannels );

    auto functor = std::bind( &OutputDeviceNodeWebAudio::renderInputs,this,std::placeholders::_1 );
    mImpl->setRenderFunction( functor );

  }