void AudioOss::applyQualitySettings() { if( hqAudio() ) { setSampleRate( Engine::mixer()->processingSampleRate() ); unsigned int value = sampleRate(); if ( ioctl( m_audioFD, SNDCTL_DSP_SPEED, &value ) < 0 ) { perror( "SNDCTL_DSP_SPEED" ); printf( "Couldn't set audio frequency\n" ); return; } if( value != sampleRate() ) { value = mixer()->baseSampleRate(); if ( ioctl( m_audioFD, SNDCTL_DSP_SPEED, &value ) < 0 ) { perror( "SNDCTL_DSP_SPEED" ); printf( "Couldn't set audio frequency\n" ); return; } setSampleRate( value ); } } AudioDevice::applyQualitySettings(); }
bool FileInputDataSource::open() { if (!mFp) { unsigned int channel; unsigned int sampleRate; audio_format_type_t pcmFormat; audio_type_t audioType; mFp = fopen(mDataPath.c_str(), "rb"); if (!mFp) { medvdbg("file open failed error : %d\n", errno); return false; } audioType = utils::getAudioTypeFromPath(mDataPath); setAudioType(audioType); switch (audioType) { case AUDIO_TYPE_MP3: case AUDIO_TYPE_AAC: if (!utils::header_parsing(mFp, audioType, &channel, &sampleRate, NULL)) { medvdbg("header parsing failed\n"); return false; } setSampleRate(sampleRate); setChannels(channel); break; case AUDIO_TYPE_WAVE: if (!utils::header_parsing(mFp, audioType, &channel, &sampleRate, &pcmFormat)) { medvdbg("header parsing failed\n"); return false; } if (fseek(mFp, WAVE_HEADER_LENGTH, SEEK_SET) != 0) { medvdbg("file seek failed error\n"); return false; } setSampleRate(sampleRate); setChannels(channel); setPcmFormat(pcmFormat); break; case AUDIO_TYPE_FLAC: /* To be supported */ break; default: /* Don't set any decoder for unsupported formats */ break; } return true; } /** return true if mFp is not null, because it means it using now */ return true; }
void GladiatorIMU::handleCommand(gladiator_cmd_t* cmd) { uint8_t* payload; switch (cmd->command) { case CMD_LOADK: printf("GladiatorIMU: Loading filter K: %f\n", *((float*)&cmd->payload)); loadFilterK(*((float*)&cmd->payload)); break; case CMD_SETRATE: printf("GladiatorIMU: Setting rate %dHz\n", cmd->payload); switch (cmd->payload) { case 100: setSampleRate(MODE_IMU_100HZ); break; case 200: setSampleRate(MODE_IMU_200HZ); break; case 500: setSampleRate(MODE_IMU_500HZ); break; case 1000: setSampleRate(MODE_IMU_1000HZ); break; } break; case CMD_GETBOARD: printf("GladiatorIMU: Returning board number bytes\n"); cmd->payload = 0; payload = (uint8_t*)&(cmd->payload); payload[0] = boardNumber[0]; payload[1] = boardNumber[1]; payload[2] = boardNumber[2]; payload[3] = boardNumber[3]; break; case CMD_GETSTATUS: printf("GladiatorIMU: Returning status bytes\n"); cmd->payload = 0; payload = (uint8_t*)&(cmd->payload); payload[0] = statusByte[0]; payload[1] = statusByte[1]; payload[2] = filterNumber; payload[3] = 0; break; default: printf("GladiatorIMU: Unknown command: 0x%d\n", cmd->command); break; } cmd->signal(); }
void AudioJack::applyQualitySettings() { if( hqAudio() ) { setSampleRate( engine::mixer()->processingSampleRate() ); if( jack_get_sample_rate( m_client ) != sampleRate() ) { setSampleRate( jack_get_sample_rate( m_client ) ); } } AudioDevice::applyQualitySettings(); }
GenericTimeDataParams::GenericTimeDataParams(DataUiHandlerProperty * baseProperty, TimePlotParams* params, QObject *parent ) : DataUiHandlerProperty(baseProperty,parent), m_startTime(0), m_name(QString("no name")), m_curveEnabled(true) { if (params) { setSampleRate(params->sampleRate()); setMaxDuration(params->maxDuration()); } else { setSampleRate(TIMEDATA_DEFAULT_SR); setMaxDuration(TIMEDATA_DEFAULT_PROJECT_TIME); } }
Result SoundSourceSndFile::open() { #ifdef __WINDOWS__ // Pointer valid until string changed LPCWSTR lpcwFilename = (LPCWSTR)getFilename().utf16(); fh = sf_wchar_open(lpcwFilename, SFM_READ, &info); #else const QByteArray qbaFilename(getFilename().toLocal8Bit()); fh = sf_open(qbaFilename.constData(), SFM_READ, &info); #endif if (fh == NULL) { // sf_format_check is only for writes qWarning() << "libsndfile: Error opening file" << getFilename() << sf_strerror(fh); return ERR; } if (sf_error(fh)>0) { qWarning() << "libsndfile: Error opening file" << getFilename() << sf_strerror(fh); return ERR; } channels = info.channels; setSampleRate(info.samplerate); // This is the 'virtual' filelength. No matter how many channels the file // actually has, we pretend it has 2. filelength = info.frames * 2; // File length with two interleaved channels return OK; }
AudioFileDevice::AudioFileDevice( OutputSettings const & outputSettings, const ch_cnt_t _channels, const QString & _file, Mixer* _mixer ) : AudioDevice( _channels, _mixer ), m_outputFile( _file ), m_outputSettings(outputSettings) { setSampleRate( outputSettings.getSampleRate() ); if( m_outputFile.open( QFile::WriteOnly | QFile::Truncate ) == false ) { QString title, message; title = ExportProjectDialog::tr( "Could not open file" ); message = ExportProjectDialog::tr( "Could not open file %1 " "for writing.\nPlease make " "sure you have write " "permission to the file and " "the directory containing the " "file and try again!" ).arg( _file ); if( gui ) { QMessageBox::critical( NULL, title, message, QMessageBox::Ok, QMessageBox::NoButton ); } else { fprintf( stderr, "%s\n", message.toUtf8().constData() ); exit( EXIT_FAILURE ); } } }
void MMA7660::init() { initAccelTable(); setMode(MMA7660_STAND_BY); setSampleRate(AUTO_SLEEP_32); setMode(MMA7660_ACTIVE); }
MainWindow::MainWindow() { setWindowTitle(tr("inspectrum")); dock = new SpectrogramControls(tr("Controls"), this); dock->setAllowedAreas(Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea); addDockWidget(Qt::LeftDockWidgetArea, dock); input = new InputSource(); plots = new PlotView(input); setCentralWidget(plots); // Connect dock inputs connect(dock, SIGNAL(openFile(QString)), this, SLOT(openFile(QString))); connect(dock->sampleRate, SIGNAL(textChanged(QString)), this, SLOT(setSampleRate(QString))); connect(dock, SIGNAL(fftOrZoomChanged(int, int)), plots, SLOT(setFFTAndZoom(int, int))); connect(dock->powerMaxSlider, SIGNAL(valueChanged(int)), plots, SLOT(setPowerMax(int))); connect(dock->powerMinSlider, SIGNAL(valueChanged(int)), plots, SLOT(setPowerMin(int))); connect(dock->cursorsCheckBox, &QCheckBox::stateChanged, plots, &PlotView::enableCursors); connect(dock->cursorBitsSpinBox, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), plots, &PlotView::setCursorBits); // Connect dock outputs connect(plots, SIGNAL(timeSelectionChanged(float)), dock, SLOT(timeSelectionChanged(float))); connect(plots, SIGNAL(zoomIn()), dock, SLOT(zoomIn())); connect(plots, SIGNAL(zoomOut()), dock, SLOT(zoomOut())); // Set defaults after making connections so everything is in sync dock->setDefaults(); }
void Device::updateFormat( const Format &format ) { size_t sampleRate = format.getSampleRate(); size_t framesPerBlock = format.getFramesPerBlock(); if( mSampleRate == sampleRate && mFramesPerBlock == framesPerBlock ) return; auto deviceMgr = Context::deviceManager(); mSignalParamsWillChange.emit(); if( sampleRate && sampleRate != mSampleRate ) { // set the samplerate to 0, forcing it to refresh on next get. mSampleRate = 0; deviceMgr->setSampleRate( shared_from_this(), sampleRate ); } if( framesPerBlock && framesPerBlock != mFramesPerBlock ) { // set the frames per block to 0, forcing it to refresh on next get mFramesPerBlock = 0; deviceMgr->setFramesPerBlock( shared_from_this(), framesPerBlock ); } if( ! deviceMgr->isFormatUpdatedAsync() ) mSignalParamsDidChange.emit(); }
void AudioPortAudio::applyQualitySettings() { if( hqAudio() ) { setSampleRate( Engine::mixer()->processingSampleRate() ); int samples = mixer()->framesPerPeriod(); PaError err = Pa_OpenStream( &m_paStream, supportsCapture() ? &m_inputParameters : NULL, // The input parameter &m_outputParameters, // The outputparameter sampleRate(), samples, paNoFlag, // Don't use any flags _process_callback, // our callback function this ); if( err != paNoError ) { printf( "Couldn't open PortAudio: %s\n", Pa_GetErrorText( err ) ); return; } } AudioDevice::applyQualitySettings(); }
void TrackInfoObject::parse() { // Log parsing of header information in developer mode. This is useful for // tracking down corrupt files. const QString& canonicalLocation = m_fileInfo.canonicalFilePath(); if (CmdlineArgs::Instance().getDeveloper()) { qDebug() << "TrackInfoObject::parse()" << canonicalLocation; } // Parse the information stored in the sound file. SoundSourceProxy proxy(canonicalLocation, m_pSecurityToken); Mixxx::SoundSource* pProxiedSoundSource = proxy.getProxiedSoundSource(); if (pProxiedSoundSource != NULL && proxy.parseHeader() == OK) { // Dump the metadata extracted from the file into the track. // TODO(XXX): This involves locking the mutex for every setXXX // method. We should figure out an optimization where there are private // setters that don't lock the mutex. // If Artist, Title and Type fields are not blank, modify them. // Otherwise, keep their current values. // TODO(rryan): Should we re-visit this decision? if (!(pProxiedSoundSource->getArtist().isEmpty())) { setArtist(pProxiedSoundSource->getArtist()); } if (!(pProxiedSoundSource->getTitle().isEmpty())) { setTitle(pProxiedSoundSource->getTitle()); } if (!(pProxiedSoundSource->getType().isEmpty())) { setType(pProxiedSoundSource->getType()); } setAlbum(pProxiedSoundSource->getAlbum()); setAlbumArtist(pProxiedSoundSource->getAlbumArtist()); setYear(pProxiedSoundSource->getYear()); setGenre(pProxiedSoundSource->getGenre()); setComposer(pProxiedSoundSource->getComposer()); setGrouping(pProxiedSoundSource->getGrouping()); setComment(pProxiedSoundSource->getComment()); setTrackNumber(pProxiedSoundSource->getTrackNumber()); setReplayGain(pProxiedSoundSource->getReplayGain()); setBpm(pProxiedSoundSource->getBPM()); setDuration(pProxiedSoundSource->getDuration()); setBitrate(pProxiedSoundSource->getBitrate()); setSampleRate(pProxiedSoundSource->getSampleRate()); setChannels(pProxiedSoundSource->getChannels()); setKeyText(pProxiedSoundSource->getKey(), mixxx::track::io::key::FILE_METADATA); setHeaderParsed(true); } else { qDebug() << "TrackInfoObject::parse() error at file" << canonicalLocation; setHeaderParsed(false); // Add basic information derived from the filename: parseFilename(); } }
OsStatus MpOss::attachReader() { OsStatus ret = OS_FAILED; UtlBoolean threadSleep = (mStReader == FALSE) && (mStWriter == FALSE); if (!((mReader == NULL) || (mStReader == TRUE))) { assert(mbReadCap == TRUE); ret = setSampleRate(mReader->mSamplesPerSec, mReader->mSamplesPerFrame); if (ret == OS_SUCCESS) { mStReader = TRUE; if (threadSleep) { threadWakeUp(); } else { threadIoStatusChanged(); } } } return ret; }
LadderFilter<Type>::LadderFilter() : state (2) { setSampleRate (Type (1000)); // intentionally setting unrealistic default // sample rate to catch missing initialisation bugs setResonance (Type (0)); setDrive (Type (1.2)); setMode (Mode::LPF12); }
void MMA7660::init(uint8_t interrupts) { initAccelTable(); setMode(MMA7660_STAND_BY); setSampleRate(AUTO_SLEEP_32); write(MMA7660_INTSU, interrupts); setMode(MMA7660_ACTIVE); }
SoundSource::OpenResult SoundSourceWV::tryOpen( OpenMode /*mode*/, const OpenParams& params) { DEBUG_ASSERT(!m_wpc); char msg[80]; // hold possible error message int openFlags = OPEN_WVC | OPEN_NORMALIZE; if ((params.channelCount() == 1) || (params.channelCount() == 2)) { openFlags |= OPEN_2CH_MAX; } // We use WavpackOpenFileInputEx to support Unicode paths on windows // http://www.wavpack.com/lib_use.txt QString wavPackFileName = getLocalFileName(); m_pWVFile = new QFile(wavPackFileName); m_pWVFile->open(QFile::ReadOnly); QString correctionFileName(wavPackFileName + "c"); if (QFile::exists(correctionFileName)) { // If there is a correction file, open it as well m_pWVCFile = new QFile(correctionFileName); m_pWVCFile->open(QFile::ReadOnly); } m_wpc = WavpackOpenFileInputEx(&s_streamReader, m_pWVFile, m_pWVCFile, msg, openFlags, 0); if (!m_wpc) { kLogger.warning() << "failed to open file : " << msg; return OpenResult::Failed; } setChannelCount(WavpackGetReducedChannels(m_wpc)); setSampleRate(WavpackGetSampleRate(m_wpc)); initFrameIndexRangeOnce( mixxx::IndexRange::forward( 0, WavpackGetNumSamples(m_wpc))); if (WavpackGetMode(m_wpc) & MODE_FLOAT) { m_sampleScaleFactor = CSAMPLE_PEAK; } else { const int bitsPerSample = WavpackGetBitsPerSample(m_wpc); if ((bitsPerSample >= 8) && (bitsPerSample <= 32)) { // Range of signed sample values: [-2 ^ (bitsPerSample - 1), 2 ^ (bitsPerSample - 1) - 1] const uint32_t absSamplePeak = 1u << (bitsPerSample - 1); DEBUG_ASSERT(absSamplePeak > 0); // Scaled range of sample values: [-CSAMPLE_PEAK, CSAMPLE_PEAK) m_sampleScaleFactor = CSAMPLE_PEAK / absSamplePeak; } else { kLogger.warning() << "Invalid bits per sample:" << bitsPerSample; return OpenResult::Aborted; } } m_curFrameIndex = frameIndexMin(); return OpenResult::Succeeded; }
SpectralCentroid::SpectralCentroid(int history_size, int spectrum_size, int sample_rate) : Feature(history_size) { initialized = false; barkWeights = NULL; barkUnits = NULL; setSpectrumSize(spectrum_size); setSampleRate(sample_rate); }
GenericTimeDataParams::GenericTimeDataParams(QObject *object) : DataUiHandlerProperty(object), m_startTime(0), m_name(QString("no name")), m_curveEnabled(true) { setSampleRate(TIMEDATA_DEFAULT_SR); setMaxDuration(TIMEDATA_DEFAULT_PROJECT_TIME); }
// Constructor Phaser::Phaser() : currentSampleRate(INIT_SAMPLE_RATE), zm1( 0.f ) { // set params and sample rate setParameters(Parameters()); setSampleRate(INIT_SAMPLE_RATE); // set depth range range(440.f, 1600.f); }
Result SoundSourceOggVorbis::open() { const QByteArray qBAFilename(getFilename().toLocal8Bit()); #ifdef __WINDOWS__ if(ov_fopen(qBAFilename.constData(), &vf) < 0) { qDebug() << "oggvorbis: Input does not appear to be an Ogg bitstream."; filelength = 0; return ERR; } #else FILE *vorbisfile = fopen(qBAFilename.constData(), "r"); if (!vorbisfile) { qDebug() << "oggvorbis: cannot open" << getFilename(); return ERR; } if(ov_open(vorbisfile, &vf, NULL, 0) < 0) { qDebug() << "oggvorbis: Input does not appear to be an Ogg bitstream."; filelength = 0; return ERR; } #endif // lookup the ogg's channels and samplerate vorbis_info * vi = ov_info(&vf, -1); channels = vi->channels; setSampleRate(vi->rate); if (channels > 2) { qDebug() << "oggvorbis: No support for more than 2 channels!"; ov_clear(&vf); filelength = 0; return ERR; } // ov_pcm_total returns the total number of frames in the ogg file. The // frame is the channel-independent measure of samples. The total samples in // the file is channels * ov_pcm_total. rryan 7/2009 I verified this by // hand. a 30 second long 48khz mono ogg and a 48khz stereo ogg both report // 1440000 for ov_pcm_total. ogg_int64_t ret = ov_pcm_total(&vf, -1); if (ret >= 0) { // We pretend that the file is stereo to the rest of the world. filelength = ret * 2; } else //error { if (ret == OV_EINVAL) { //The file is not seekable. Not sure if any action is needed. qDebug() << "oggvorbis: file is not seekable " << getFilename(); } } return OK; }
bool AREngine::initHardware(IOService* inProvider) { bool theAnswer = false; if(IOAudioEngine::initHardware(inProvider)) { IOAudioSampleRate theInitialSampleRate = { 0, 0 }; UInt32 theNumberChannels = 0; // create the streams if(CreateStreams(&theInitialSampleRate, &theNumberChannels) && (theInitialSampleRate.whole != 0)) { CreateControls(theNumberChannels); // figure out how long each block is in microseconds mBlockTimeoutMicroseconds = 1000000 * mBlockSize / theInitialSampleRate.whole; setSampleRate(&theInitialSampleRate); // Set the number of sample frames in each buffer setNumSampleFramesPerBuffer(mBlockSize * mNumberBlocks); // set up the timer IOWorkLoop* theWorkLoop = getWorkLoop(); if(theWorkLoop != NULL) { mTimerEventSource = IOTimerEventSource::timerEventSource(this, TimerFired); if(mTimerEventSource != NULL) { theWorkLoop->addEventSource(mTimerEventSource); theAnswer = true; } } // set the safety offset // note that due to cache issues, it probably isn't wise to leave the safety offset at 0, // we set it to 4 here, just to be safe. setSampleOffset(4); // set up the time stamp generator mTimeStampGenerator.SetSampleRate(theInitialSampleRate.whole); mTimeStampGenerator.SetFramesPerRingBuffer(mBlockSize * mNumberBlocks); // nate that the rate scalar is a 4.28 fixed point number // this means that each incremnt is 1/2^28 mTimeStampGenerator.SetRateScalar(1UL << 28); // set the maximum jitter // AbsoluteTime theMaximumJitter = { 0, 0 }; // nanoseconds_to_absolutetime(5ULL * 1000ULL, &theMaximumJitter); // mTimeStampGenerator.SetMaximumJitter(theMaximumJitter.lo); } } return theAnswer; }
void AudioThread::run() { #ifdef __APPLE__ pthread_t tID = pthread_self(); // ID of this thread int priority = sched_get_priority_max( SCHED_RR) - 1; sched_param prio = {priority}; // scheduling priority of thread pthread_setschedparam(tID, SCHED_RR, &prio); #endif std::cout << "Audio thread initializing.." << std::endl; if (dac.getDeviceCount() < 1) { std::cout << "No audio devices found!" << std::endl; return; } setupDevice((outputDevice.load() == -1) ? (dac.getDefaultOutputDevice()) : outputDevice.load()); std::cout << "Audio thread started." << std::endl; inputQueue = (AudioThreadInputQueue *)getInputQueue("AudioDataInput"); threadQueueNotify = (DemodulatorThreadCommandQueue*)getOutputQueue("NotifyQueue"); while (!terminated) { AudioThreadCommand command; cmdQueue.pop(command); if (command.cmd == AudioThreadCommand::AUDIO_THREAD_CMD_SET_DEVICE) { setupDevice(command.int_value); } if (command.cmd == AudioThreadCommand::AUDIO_THREAD_CMD_SET_SAMPLE_RATE) { setSampleRate(command.int_value); } } if (deviceController[parameters.deviceId] != this) { deviceController[parameters.deviceId]->removeThread(this); } else { try { if (dac.isStreamOpen()) { if (dac.isStreamRunning()) { dac.stopStream(); } dac.closeStream(); } } catch (RtAudioError& e) { e.printMessage(); } } if (threadQueueNotify != NULL) { DemodulatorThreadCommand tCmd(DemodulatorThreadCommand::DEMOD_THREAD_CMD_AUDIO_TERMINATED); tCmd.context = this; threadQueueNotify->push(tCmd); } std::cout << "Audio thread done." << std::endl; }
void DecoderBinaural::setPinnaSize(PinnaSize pinnaSize) { if(m_pinna_size != pinnaSize || getState() == 0) { m_pinna_size = pinnaSize; unsigned int sample_rate = m_sample_rate; m_sample_rate = 0; setSampleRate(sample_rate); } }
UGen & UGen::patch( UGenInput & connectToInput ) { connectToInput.setIncomingUGen( this ); mNumOutputs += 1; setSampleRate( connectToInput.getOuterUGen().sampleRate() ); return connectToInput.getOuterUGen(); }
bool eqMac2DriverEngine::initHardware(IOService *provider) { bool result = false; IOAudioSampleRate initialSampleRate; IOWorkLoop *wl; //IOLog("eqMac2DriverEngine[%p]::initHardware(%p)\n", this, provider); duringHardwareInit = TRUE; if (!super::initHardware(provider)) { goto Done; } initialSampleRate.whole = 0; initialSampleRate.fraction = 0; if (!createAudioStreams(&initialSampleRate)) { IOLog("eqMac2DriverEngine::initHardware() failed\n"); goto Done; } if (initialSampleRate.whole == 0) { goto Done; } // calculate our timeout in nanosecs, taking care to keep 64bits blockTimeoutNS = blockSize; blockTimeoutNS *= 1000000000; blockTimeoutNS /= initialSampleRate.whole; setSampleRate(&initialSampleRate); // Set the number of sample frames in each buffer setNumSampleFramesPerBuffer(blockSize * numBlocks); wl = getWorkLoop(); if (!wl) { goto Done; } timerEventSource = IOTimerEventSource::timerEventSource(this, ourTimerFired); if (!timerEventSource) { goto Done; } workLoop->addEventSource(timerEventSource); result = true; Done: duringHardwareInit = FALSE; return result; }
MonoDelay::MonoDelay( int maxTime , int sampleRate ) { m_buffer = 0; m_maxTime = maxTime; m_maxLength = maxTime * sampleRate; m_length = m_maxLength; m_index = 0; m_feedback = 0.0f; setSampleRate( sampleRate ); }
void ReSID::loadFromBuffer(uint8_t **buffer) { debug(2, " Loading ReSID state...\n"); setChipModel((chip_model)read8(buffer)); setAudioFilter((bool)read8(buffer)); setSamplingMethod((sampling_method)read8(buffer)); setSampleRate(read32(buffer)); setClockFrequency(read32(buffer)); }
void AudioSndio::applyQualitySettings( void ) { if( hqAudio() ) { setSampleRate( Engine::mixer()->processingSampleRate() ); /* change sample rate to sampleRate() */ } AudioDevice::applyQualitySettings(); }
void CubicSDR::setDevice(SDRDeviceInfo *dev) { if (!sdrThread->isTerminated()) { sdrThread->terminate(); if (t_SDR) { t_SDR->join(); delete t_SDR; } } for (SoapySDR::Kwargs::const_iterator i = settingArgs.begin(); i != settingArgs.end(); i++) { sdrThread->writeSetting(i->first, i->second); } sdrThread->setStreamArgs(streamArgs); sdrThread->setDevice(dev); DeviceConfig *devConfig = config.getDevice(dev->getDeviceId()); SoapySDR::Device *soapyDev = dev->getSoapyDevice(); if (soapyDev) { if (long devSampleRate = devConfig->getSampleRate()) { sampleRate = dev->getSampleRateNear(SOAPY_SDR_RX, 0, devSampleRate); sampleRateInitialized.store(true); } if (!sampleRateInitialized.load()) { sampleRate = dev->getSampleRateNear(SOAPY_SDR_RX, 0, DEFAULT_SAMPLE_RATE); sampleRateInitialized.store(true); } else { sampleRate = dev->getSampleRateNear(SOAPY_SDR_RX, 0, sampleRate); } if (frequency < sampleRate/2) { frequency = sampleRate/2; } setFrequency(frequency); setSampleRate(sampleRate); setPPM(devConfig->getPPM()); setOffset(devConfig->getOffset()); if (devConfig->getAGCMode()) { setAGCMode(true); } else { setAGCMode(false); } t_SDR = new std::thread(&SDRThread::threadMain, sdrThread); } stoppedDev = nullptr; }
UGen & UGen::patch( UGen & connectToUGen ) { connectToUGen.addInput( this ); // TODO jam3: nOutputs should only increase when this chain will be ticked! mNumOutputs += 1; setSampleRate( connectToUGen.sampleRate() ); return connectToUGen; }