OutputDeviceNode::OutputDeviceNode( const DeviceRef &device, const Format &format ) : OutputNode( format ), mDevice( device ) { if( ! mDevice ) { string errorMsg = "Empty DeviceRef."; if( ! audio::Device::getDefaultOutput() ) errorMsg += " Also, no default output Device so perhaps there is no available hardware output."; throw AudioDeviceExc( errorMsg ); } // listen to the notifications sent by device property changes in order to update the audio graph. mWillChangeConn = mDevice->getSignalParamsWillChange().connect( bind( &OutputDeviceNode::deviceParamsWillChange, this ) ); mDidChangeConn = mDevice->getSignalParamsDidChange().connect( bind( &OutputDeviceNode::deviceParamsDidChange, this ) ); mInterruptionBeganConn = Context::deviceManager()->getSignalInterruptionBegan().connect( [this] { disable(); } ); mInterruptionEndedConn = Context::deviceManager()->getSignalInterruptionEnded().connect( [this] { enable(); } ); size_t deviceNumChannels = mDevice->getNumOutputChannels(); // If number of channels hasn't been specified, default to 2 (or 1 if that is all that is available). if( getChannelMode() != ChannelMode::SPECIFIED ) { setChannelMode( ChannelMode::SPECIFIED ); setNumChannels( std::min( deviceNumChannels, (size_t)2 ) ); } // Double check the device has enough channels to support what was requested, which may not be the case if the user asked for more than what is available. if( deviceNumChannels < getNumChannels() ) throw AudioFormatExc( string( "Device can not accommodate " ) + to_string( deviceNumChannels ) + " output channels." ); }
Image::Image(int num_channels, int width, int height) { init(); setWidth(width); setHeight(height); setNumChannels(num_channels); }
Image::Image(int num_channels, int width, int height, int classification) { init(); setWidth(width); setHeight(height); setNumChannels(num_channels); setClassification(classification); }
void poImage::load(const std::string &url, uint c) { FIBITMAP *bmp = loadDIB(url); if(bmp) { bitmap = bmp; this->url = url; setNumChannels(c); totalAllocatedImageMemorySize += FreeImage_GetDIBSize(bitmap); } }
//============================================================================== AudioVisualiserComponent::AudioVisualiserComponent (const int initialNumChannels) : numSamples (1024), inputSamplesPerBlock (256), backgroundColour (Colours::black), waveformColour (Colours::white) { setOpaque (true); setNumChannels (initialNumChannels); setRepaintRate (60); }
void MainWindow::on_actionScan_triggered() { int ret, i; QString s; struct logic_analyzer la; statusBar()->showMessage(tr("Scanning for logic analyzers...")); ret = flosslogic_scan_for_devices(&ctx); if (ret < 0) { s = tr("No supported logic analyzer found."); statusBar()->showMessage(s); return; } else { s = tr("Found supported logic analyzer: "); s.append(flosslogic_logic_analyzers[ret].shortname); statusBar()->showMessage(s); } setCurrentLA(ret); setNumChannels(flosslogic_logic_analyzers[ret].numchannels); ui->comboBoxLA->clear(); ui->comboBoxLA->addItem(flosslogic_logic_analyzers[ret].shortname); ui->labelChannels->setText(s.sprintf("Channels: %d", flosslogic_logic_analyzers[ret].numchannels)); i = 0; la = flosslogic_logic_analyzers[getCurrentLA()]; while (la.samplerates[i].string != NULL) { ui->comboBoxSampleRate->addItem(la.samplerates[i].string, la.samplerates[i].samplerate); i++; } /* FIXME */ ui->comboBoxNumSamples->addItem("3000000", 3000000); ui->comboBoxNumSamples->addItem("2000000", 2000000); ui->comboBoxNumSamples->addItem("1000000", 1000000); ui->comboBoxNumSamples->setEditable(true); ret = flosslogic_hw_init(getCurrentLA(), &ctx); if (ret < 0) statusBar()->showMessage(tr("ERROR: LA init failed.")); if (getCurrentLA() >= 0) setupDockWidgets(); /* Enable all relevant fields now (i.e. make them non-gray). */ ui->comboBoxSampleRate->setEnabled(true); ui->comboBoxNumSamples->setEnabled(true); ui->labelChannels->setEnabled(true); ui->action_Get_samples->setEnabled(true); }
static boolByte _openSampleSourceAiff(void *sampleSourcePtr, const SampleSourceOpenAs openAs) { SampleSource sampleSource = (SampleSource)sampleSourcePtr; #if HAVE_LIBAUDIOFILE SampleSourceAudiofileData extraData = (SampleSourceAudiofileData)(sampleSource->extraData); #else SampleSourcePcmData extraData = (SampleSourcePcmData)(sampleSource->extraData); #endif if(openAs == SAMPLE_SOURCE_OPEN_READ) { #if HAVE_LIBAUDIOFILE extraData->fileHandle = afOpenFile(sampleSource->sourceName->data, "r", NULL); if(extraData->fileHandle != NULL) { setNumChannels(afGetVirtualChannels(extraData->fileHandle, AF_DEFAULT_TRACK)); setSampleRate((float)afGetRate(extraData->fileHandle, AF_DEFAULT_TRACK)); } #else logInternalError("Executable was not built with a library to read AIFF files"); #endif } else if(openAs == SAMPLE_SOURCE_OPEN_WRITE) { #if HAVE_LIBAUDIOFILE AFfilesetup outfileSetup = afNewFileSetup(); afInitFileFormat(outfileSetup, AF_FILE_AIFF); afInitByteOrder(outfileSetup, AF_DEFAULT_TRACK, AF_BYTEORDER_BIGENDIAN); afInitChannels(outfileSetup, AF_DEFAULT_TRACK, getNumChannels()); afInitRate(outfileSetup, AF_DEFAULT_TRACK, getSampleRate()); afInitSampleFormat(outfileSetup, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, DEFAULT_BITRATE); extraData->fileHandle = afOpenFile(sampleSource->sourceName->data, "w", outfileSetup); #else logInternalError("Executable was not built with a library to write AIFF files"); #endif } else { logInternalError("Invalid type for openAs in AIFF file"); return false; } if(extraData->fileHandle == NULL) { logError("AIFF file '%s' could not be opened for '%s'", sampleSource->sourceName->data, openAs == SAMPLE_SOURCE_OPEN_READ ? "reading" : "writing"); return false; } sampleSource->openedAs = openAs; return true; }
/** Downloads all of the current EPICS settings to the electrometer. * Typically used after the electrometer is power-cycled. */ asynStatus drvQuadEM::reset() { epicsInt32 iValue; epicsFloat64 dValue; getIntegerParam(P_Range, &iValue); setRange(iValue); getIntegerParam(P_ValuesPerRead, &iValue); setValuesPerRead(iValue); getDoubleParam(P_AveragingTime, &dValue); setAveragingTime(dValue); getIntegerParam(P_TriggerMode, &iValue); setTriggerMode(iValue); getIntegerParam(P_NumChannels, &iValue); setNumChannels(iValue); getIntegerParam(P_BiasState, &iValue); setBiasState(iValue); getIntegerParam(P_BiasInterlock, &iValue); setBiasInterlock(iValue); getDoubleParam(P_BiasVoltage, &dValue); setBiasVoltage(dValue); getIntegerParam(P_Resolution, &iValue); setResolution(iValue); getIntegerParam(P_ReadFormat, &iValue); setReadFormat(iValue); getDoubleParam(P_IntegrationTime, &dValue); setIntegrationTime(dValue); readStatus(); getIntegerParam(P_Acquire, &iValue); setAcquire(iValue); return asynSuccess; }
static boolByte _openSampleSourceWave(void *sampleSourcePtr, const SampleSourceOpenAs openAs) { SampleSource sampleSource = (SampleSource)sampleSourcePtr; SampleSourcePcmData extraData = (SampleSourcePcmData)sampleSource->extraData; if (openAs == SAMPLE_SOURCE_OPEN_READ) { extraData->fileHandle = fopen(sampleSource->sourceName->data, "rb"); if (extraData->fileHandle != NULL) { if (_readWaveFileInfo(sampleSource->sourceName->data, extraData)) { setNumChannels(extraData->numChannels); setSampleRate(extraData->sampleRate); } else { fclose(extraData->fileHandle); extraData->fileHandle = NULL; } } } else if (openAs == SAMPLE_SOURCE_OPEN_WRITE) { extraData->fileHandle = fopen(sampleSource->sourceName->data, "wb"); if (extraData->fileHandle != NULL) { extraData->numChannels = (unsigned short)getNumChannels(); extraData->sampleRate = (unsigned int)getSampleRate(); extraData->bitDepth = getBitDepth(); if (!_writeWaveFileInfo(extraData)) { fclose(extraData->fileHandle); extraData->fileHandle = NULL; } } } else { logInternalError("Invalid type for openAs in WAVE file"); return false; } if (extraData->fileHandle == NULL) { logError("WAVE file '%s' could not be opened for %s", sampleSource->sourceName->data, openAs == SAMPLE_SOURCE_OPEN_READ ? "reading" : "writing"); return false; } sampleSource->openedAs = openAs; return true; }
OutputDeviceNode::OutputDeviceNode( const DeviceRef &device, const Format &format ) : OutputNode( format ), mDevice( device ) { CI_ASSERT( mDevice ); // listen to the notifications sent by device property changes in order to update the audio graph. mWillChangeConn = mDevice->getSignalParamsWillChange().connect( bind( &OutputDeviceNode::deviceParamsWillChange, this ) ); mDidChangeConn = mDevice->getSignalParamsDidChange().connect( bind( &OutputDeviceNode::deviceParamsDidChange, this ) ); size_t deviceNumChannels = mDevice->getNumOutputChannels(); // If number of channels hasn't been specified, default to 2 (or 1 if that is all that is available). if( getChannelMode() != ChannelMode::SPECIFIED ) { setChannelMode( ChannelMode::SPECIFIED ); setNumChannels( std::min( deviceNumChannels, (size_t)2 ) ); } // Double check the device has enough channels to support what was requested, which may not be the case if the user asked for more than what is available. if( deviceNumChannels < getNumChannels() ) throw AudioFormatExc( string( "Device can not accommodate " ) + to_string( deviceNumChannels ) + " output channels." ); }
void MainWindow::on_action_New_triggered() { for (int i = 0; i < NUMCHANNELS; ++i) { if (dockWidgets[i]) { /* TODO: Check if all childs are also killed. */ delete dockWidgets[i]; dockWidgets[i] = NULL; } } ui->comboBoxLA->clear(); ui->comboBoxLA->addItem(tr("No LA detected")); ui->labelChannels->setText(tr("Channels: ")); ui->labelChannels->setEnabled(false); ui->comboBoxSampleRate->clear(); ui->comboBoxSampleRate->setEnabled(false); ui->comboBoxNumSamples->clear(); ui->comboBoxNumSamples->setEnabled(false); ui->labelSampleStart->setText(tr("Start sample: ")); ui->labelSampleStart->setEnabled(false); ui->labelSampleEnd->setText(tr("End sample: ")); ui->labelSampleEnd->setEnabled(false); ui->labelScaleFactor->setText(tr("Scale factor: ")); ui->labelScaleFactor->setEnabled(false); ui->action_Save_as->setEnabled(false); ui->action_Get_samples->setEnabled(false); setNumChannels(0); /* TODO: More cleanups. */ /* TODO: Free sample buffer(s). */ }
// TODO: Checking for DelayNode below is a kludge and will not work for other types that want to support feedback. // With more investigation it might be possible to avoid this, or at least define some interface that // specifies whether this input needs to be summed. void Node::configureConnections() { CI_ASSERT( getContext() ); mProcessInPlace = supportsProcessInPlace(); if( getNumConnectedInputs() > 1 || getNumConnectedOutputs() > 1 ) mProcessInPlace = false; bool isDelay = ( dynamic_cast<DelayNode *>( this ) != nullptr ); // see note above bool inputChannelsUnequal = inputChannelsAreUnequal(); for( auto &input : mInputs ) { bool inputProcessInPlace = true; size_t inputNumChannels = input->getNumChannels(); if( ! supportsInputNumChannels( inputNumChannels ) ) { if( mChannelMode == ChannelMode::MATCHES_INPUT ) setNumChannels( getMaxNumInputChannels() ); else if( input->getChannelMode() == ChannelMode::MATCHES_OUTPUT ) { input->setNumChannels( mNumChannels ); input->configureConnections(); } else { mProcessInPlace = false; inputProcessInPlace = false; } } // inputs with more than one output cannot process in-place, so make them sum if( input->getProcessesInPlace() && input->getNumConnectedOutputs() > 1 ) inputProcessInPlace = false; // when there are multiple inputs and their channel counts don't match, they must be summed if( inputChannelsUnequal ) inputProcessInPlace = false; // if we're unable to process in-place and we're a DelayNode, its possible that the input may be part of a feedback loop, in which case input must sum. if( ! mProcessInPlace && isDelay ) inputProcessInPlace = false; if( ! inputProcessInPlace ) input->setupProcessWithSumming(); input->initializeImpl(); } for( auto &out : mOutputs ) { NodeRef output = out.lock(); if( ! output ) continue; if( ! output->supportsInputNumChannels( mNumChannels ) ) { if( output->getChannelMode() == ChannelMode::MATCHES_INPUT ) { output->setNumChannels( mNumChannels ); output->configureConnections(); } else mProcessInPlace = false; } } if( ! mProcessInPlace ) setupProcessWithSumming(); initializeImpl(); }
DelayNode::DelayNode( const Format &format ) : Node( format ), mParamDelaySeconds( this, 0 ), mWriteIndex( 0 ), mSampleRate( 0 ), mMaxDelaySeconds( 0 ) { setNumChannels( 1 ); setChannelMode( ChannelMode::SPECIFIED ); }
void MainWindow::on_action_Open_triggered() { QString s; QString fileName = QFileDialog::getOpenFileName(this, tr("Open sample file"), ".", tr("Raw sample files (*.raw *.bin);;" "Gnuplot data files (*.dat);;" "VCD files (*.vcd);;" "All files (*)")); if (fileName == NULL) return; QFile file(fileName); file.open(QIODevice::ReadOnly); QDataStream in(&file); /* TODO: Implement support for loading different input formats. */ sample_buffer = (uint8_t *)malloc(file.size()); if (sample_buffer == NULL) { /* TODO: Error handling. */ } in.readRawData((char *)sample_buffer, file.size()); setNumSamples(file.size()); setNumChannels(8); /* FIXME */ file.close(); setupDockWidgets(); ui->comboBoxLA->clear(); ui->comboBoxLA->addItem(tr("File")); /* FIXME: Store number of channels in the file or allow user config. */ s.sprintf("%d", getNumChannels()); s.prepend(tr("Channels: ")); ui->labelChannels->setText(s); ui->labelChannels->setEnabled(false); ui->comboBoxSampleRate->clear(); ui->comboBoxSampleRate->setEnabled(false); /* FIXME */ ui->comboBoxNumSamples->clear(); ui->comboBoxNumSamples->addItem(s.sprintf("%llu", getNumSamples()), getNumSamples()); ui->comboBoxNumSamples->setEnabled(true); ui->labelSampleStart->setText(tr("Start sample: ")); ui->labelSampleStart->setEnabled(true); ui->labelSampleEnd->setText(tr("End sample: ")); ui->labelSampleEnd->setEnabled(true); ui->labelZoomFactor->setText(tr("Zoom factor: ")); ui->labelZoomFactor->setEnabled(true); ui->action_Save_as->setEnabled(true); ui->action_Get_samples->setEnabled(false); for (int i = 0; i < getNumChannels(); ++i) { channelRenderAreas[i]->setChannelNumber(i); channelRenderAreas[i]->setNumSamples(file.size()); channelRenderAreas[i]->setSampleStart(0); channelRenderAreas[i]->setSampleEnd(getNumSamples()); channelRenderAreas[i]->update(); } /* FIXME */ }
static boolByte _readWaveFileInfo(const char *filename, SampleSourcePcmData extraData) { int chunkOffset = 0; RiffChunk chunk = newRiffChunk(); boolByte dataChunkFound = false; char format[4]; size_t itemsRead; unsigned int audioFormat; unsigned int byteRate; unsigned int expectedByteRate; unsigned int blockAlign; unsigned int expectedBlockAlign; if (riffChunkReadNext(chunk, extraData->fileHandle, false)) { if (!riffChunkIsIdEqualTo(chunk, "RIFF")) { logFileError(filename, "Invalid RIFF chunk descriptor"); freeRiffChunk(chunk); return false; } // The WAVE file format has two sub-chunks, with the size of both calculated in the size field. Before // either of the subchunks, there are an extra 4 bytes which indicate the format type. We need to read // that before either of the subchunks can be parsed. itemsRead = fread(format, sizeof(byte), 4, extraData->fileHandle); if (itemsRead != 4 || strncmp(format, "WAVE", 4)) { logFileError(filename, "Invalid format description"); freeRiffChunk(chunk); return false; } } else { logFileError(filename, "No chunks following descriptor"); freeRiffChunk(chunk); return false; } if (riffChunkReadNext(chunk, extraData->fileHandle, true)) { if (!riffChunkIsIdEqualTo(chunk, "fmt ")) { logError(filename, "Invalid format chunk header"); freeRiffChunk(chunk); return false; } audioFormat = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; if (audioFormat != 1) { logError("WAVE file with audio format %d is not supported", audioFormat); freeRiffChunk(chunk); return false; } extraData->numChannels = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; setNumChannels(extraData->numChannels); extraData->sampleRate = convertByteArrayToUnsignedInt(chunk->data + chunkOffset); chunkOffset += 4; setSampleRate(extraData->sampleRate); byteRate = convertByteArrayToUnsignedInt(chunk->data + chunkOffset); chunkOffset += 4; blockAlign = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; extraData->bitDepth = (BitDepth) convertByteArrayToUnsignedShort(chunk->data + chunkOffset); if (extraData->bitDepth != kBitDepth16Bit) { logUnsupportedFeature("Non-16-bit files with internal WAVE file support (build with audiofile instead!)"); freeRiffChunk(chunk); return false; } expectedByteRate = (unsigned int)(extraData->sampleRate) * extraData->numChannels * extraData->bitDepth / 8; if (expectedByteRate != byteRate) { logWarn("Possibly invalid bitrate %d, expected %d", byteRate, expectedByteRate); } expectedBlockAlign = (unsigned int)(extraData->numChannels * extraData->bitDepth / 8); if (expectedBlockAlign != blockAlign) { logWarn("Possibly invalid block align %d, expected %d", blockAlign, expectedBlockAlign); } } else { logFileError(filename, "WAVE file has no chunks following format"); freeRiffChunk(chunk); return false; } // We don't need the format data anymore, so free and re-alloc the chunk to avoid a small memory leak freeRiffChunk(chunk); chunk = newRiffChunk(); // FFMpeg (and possibly other programs) have extra sections between the fmt and data chunks. They // can be safely ignored. We just need to find the data chunk. See also: // http://forum.videohelp.com/threads/359689-ffmpeg-Override-Set-ISFT-Metadata while (!dataChunkFound) { if (riffChunkReadNext(chunk, extraData->fileHandle, false)) { if (riffChunkIsIdEqualTo(chunk, "data")) { logDebug("WAVE file has %d bytes", chunk->size); dataChunkFound = true; } else { fseek(extraData->fileHandle, (long) chunk->size, SEEK_CUR); } } else { break; } } if (!dataChunkFound) { logFileError(filename, "Could not find a data chunk. Possibly malformed WAVE file."); freeRiffChunk(chunk); return false; } freeRiffChunk(chunk); return true; }
static boolByte _openSampleSourceWave(void *sampleSourcePtr, const SampleSourceOpenAs openAs) { SampleSource sampleSource = (SampleSource)sampleSourcePtr; #if HAVE_LIBAUDIOFILE SampleSourceAudiofileData extraData = sampleSource->extraData; #else SampleSourcePcmData extraData = (SampleSourcePcmData)sampleSource->extraData; #endif if(openAs == SAMPLE_SOURCE_OPEN_READ) { #if HAVE_LIBAUDIOFILE extraData->fileHandle = afOpenFile(sampleSource->sourceName->data, "r", NULL); if(extraData->fileHandle != NULL) { setNumChannels(afGetVirtualChannels(extraData->fileHandle, AF_DEFAULT_TRACK)); setSampleRate((float)afGetRate(extraData->fileHandle, AF_DEFAULT_TRACK)); } #else extraData->fileHandle = fopen(sampleSource->sourceName->data, "rb"); if(extraData->fileHandle != NULL) { if(_readWaveFileInfo(sampleSource->sourceName->data, extraData)) { setNumChannels(extraData->numChannels); setSampleRate(extraData->sampleRate); } else { fclose(extraData->fileHandle); extraData->fileHandle = NULL; } } #endif } else if(openAs == SAMPLE_SOURCE_OPEN_WRITE) { #if HAVE_LIBAUDIOFILE AFfilesetup outfileSetup = afNewFileSetup(); afInitFileFormat(outfileSetup, AF_FILE_WAVE); afInitByteOrder(outfileSetup, AF_DEFAULT_TRACK, AF_BYTEORDER_LITTLEENDIAN); afInitChannels(outfileSetup, AF_DEFAULT_TRACK, getNumChannels()); afInitRate(outfileSetup, AF_DEFAULT_TRACK, getSampleRate()); afInitSampleFormat(outfileSetup, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, DEFAULT_BITRATE); extraData->fileHandle = afOpenFile(sampleSource->sourceName->data, "w", outfileSetup); #else extraData->fileHandle = fopen(sampleSource->sourceName->data, "wb"); if(extraData->fileHandle != NULL) { extraData->numChannels = (unsigned short)getNumChannels(); extraData->sampleRate = (unsigned int)getSampleRate(); extraData->bitsPerSample = 16; if(!_writeWaveFileInfo(extraData)) { fclose(extraData->fileHandle); extraData->fileHandle = NULL; } } #endif } else { logInternalError("Invalid type for openAs in WAVE file"); return false; } if(extraData->fileHandle == NULL) { logError("WAVE file '%s' could not be opened for %s", sampleSource->sourceName->data, openAs == SAMPLE_SOURCE_OPEN_READ ? "reading" : "writing"); return false; } sampleSource->openedAs = openAs; return true; }
int mrsWatsonMain(ErrorReporter errorReporter, int argc, char** argv) { ReturnCodes result; // Input/Output sources, plugin chain, and other required objects SampleSource inputSource = NULL; SampleSource outputSource = NULL; AudioClock audioClock; PluginChain pluginChain; CharString pluginSearchRoot = newCharString(); boolByte shouldDisplayPluginInfo = false; MidiSequence midiSequence = NULL; MidiSource midiSource = NULL; unsigned long maxTimeInMs = 0; unsigned long maxTimeInFrames = 0; unsigned long tailTimeInMs = 0; unsigned long tailTimeInFrames = 0; unsigned long processingDelayInFrames; ProgramOptions programOptions; ProgramOption option; Plugin headPlugin; SampleBuffer inputSampleBuffer = NULL; SampleBuffer outputSampleBuffer = NULL; TaskTimer initTimer, totalTimer, inputTimer, outputTimer = NULL; LinkedList taskTimerList = NULL; CharString totalTimeString = NULL; boolByte finishedReading = false; SampleSource silentSampleInput; SampleSource silentSampleOutput; unsigned int i; initTimer = newTaskTimerWithCString(PROGRAM_NAME, "Initialization"); totalTimer = newTaskTimerWithCString(PROGRAM_NAME, "Total Time"); taskTimerStart(initTimer); taskTimerStart(totalTimer); initEventLogger(); initAudioSettings(); initAudioClock(); audioClock = getAudioClock(); initPluginChain(); pluginChain = getPluginChain(); programOptions = newMrsWatsonOptions(); inputSource = sampleSourceFactory(NULL); if(!programOptionsParseArgs(programOptions, argc, argv)) { printf("Run with '--help' to see possible options\n"); printf("Or run with '--help full' to see extended help for all options\n"); return RETURN_CODE_INVALID_ARGUMENT; } // These options conflict with standard processing (more or less), so check to see if the user wanted one // of these and then exit right away. if(argc == 1) { printf("%s needs at least a plugin, input source, and output source to run.\n\n", PROGRAM_NAME); printMrsWatsonQuickstart(argv[0]); return RETURN_CODE_NOT_RUN; } else if(programOptions->options[OPTION_HELP]->enabled) { printMrsWatsonQuickstart(argv[0]); if(charStringIsEmpty(programOptionsGetString(programOptions, OPTION_HELP))) { printf("All options, where <argument> is required and [argument] is optional:\n"); programOptionsPrintHelp(programOptions, false, DEFAULT_INDENT_SIZE); } else { if(charStringIsEqualToCString(programOptionsGetString(programOptions, OPTION_HELP), "full", true)) { programOptionsPrintHelp(programOptions, true, DEFAULT_INDENT_SIZE); } // Yeah this is a bit silly, but the performance obviously doesn't matter // here and I don't feel like cluttering up this already huge function // with more variables. else if(programOptionsFind(programOptions, programOptionsGetString(programOptions, OPTION_HELP))) { programOptionPrintHelp(programOptionsFind(programOptions, programOptionsGetString(programOptions, OPTION_HELP)), true, DEFAULT_INDENT_SIZE, 0); } else { printf("Invalid option '%s', try running --help full to see help for all options\n", programOptionsGetString(programOptions, OPTION_HELP)->data); } } return RETURN_CODE_NOT_RUN; } else if(programOptions->options[OPTION_VERSION]->enabled) { printVersion(); return RETURN_CODE_NOT_RUN; } else if(programOptions->options[OPTION_COLOR_TEST]->enabled) { printTestPattern(); return RETURN_CODE_NOT_RUN; } // See if we are to make an error report and make necessary changes to the // options for good diagnostics. Note that error reports cannot be generated // for any of the above options which return with RETURN_CODE_NOT_RUN. else if(programOptions->options[OPTION_ERROR_REPORT]->enabled) { errorReporterInitialize(errorReporter); programOptions->options[OPTION_VERBOSE]->enabled = true; programOptions->options[OPTION_LOG_FILE]->enabled = true; programOptions->options[OPTION_DISPLAY_INFO]->enabled = true; // Shell script with original command line arguments errorReporterCreateLauncher(errorReporter, argc, argv); // Rewrite some paths before any input or output sources have been opened. _remapFileToErrorReport(errorReporter, programOptions->options[OPTION_INPUT_SOURCE], true); _remapFileToErrorReport(errorReporter, programOptions->options[OPTION_OUTPUT_SOURCE], false); _remapFileToErrorReport(errorReporter, programOptions->options[OPTION_MIDI_SOURCE], true); _remapFileToErrorReport(errorReporter, programOptions->options[OPTION_LOG_FILE], false); } // Read in options from a configuration file, if given if(programOptions->options[OPTION_CONFIG_FILE]->enabled) { if(!programOptionsParseConfigFile(programOptions, programOptionsGetString(programOptions, OPTION_CONFIG_FILE))) { return RETURN_CODE_INVALID_ARGUMENT; } } // Parse these options first so that log messages displayed in the below // loop are properly displayed if(programOptions->options[OPTION_VERBOSE]->enabled) { setLogLevel(LOG_DEBUG); } else if(programOptions->options[OPTION_QUIET]->enabled) { setLogLevel(LOG_ERROR); } else if(programOptions->options[OPTION_LOG_LEVEL]->enabled) { setLogLevelFromString(programOptionsGetString(programOptions, OPTION_LOG_LEVEL)); } if(programOptions->options[OPTION_COLOR_LOGGING]->enabled) { // If --color was given but with no string argument, then force color. Otherwise // colors will be provided automatically anyways. if(charStringIsEmpty(programOptionsGetString(programOptions, OPTION_COLOR_LOGGING))) { programOptionsSetCString(programOptions, OPTION_COLOR_LOGGING, "force"); } setLoggingColorEnabledWithString(programOptionsGetString(programOptions, OPTION_COLOR_LOGGING)); } if(programOptions->options[OPTION_LOG_FILE]->enabled) { setLogFile(programOptionsGetString(programOptions, OPTION_LOG_FILE)); } // Parse other options and set up necessary objects for(i = 0; i < programOptions->numOptions; i++) { option = programOptions->options[i]; if(option->enabled) { switch(option->index) { case OPTION_BLOCKSIZE: setBlocksize((const unsigned long)programOptionsGetNumber(programOptions, OPTION_BLOCKSIZE)); break; case OPTION_CHANNELS: setNumChannels((const unsigned long)programOptionsGetNumber(programOptions, OPTION_CHANNELS)); break; case OPTION_DISPLAY_INFO: shouldDisplayPluginInfo = true; break; case OPTION_INPUT_SOURCE: freeSampleSource(inputSource); inputSource = sampleSourceFactory(programOptionsGetString(programOptions, OPTION_INPUT_SOURCE)); break; case OPTION_MAX_TIME: maxTimeInMs = (const unsigned long)programOptionsGetNumber(programOptions, OPTION_MAX_TIME); break; case OPTION_MIDI_SOURCE: midiSource = newMidiSource(guessMidiSourceType(programOptionsGetString( programOptions, OPTION_MIDI_SOURCE)), programOptionsGetString(programOptions, OPTION_MIDI_SOURCE)); break; case OPTION_OUTPUT_SOURCE: outputSource = sampleSourceFactory(programOptionsGetString(programOptions, OPTION_OUTPUT_SOURCE)); break; case OPTION_PLUGIN_ROOT: charStringCopy(pluginSearchRoot, programOptionsGetString(programOptions, OPTION_PLUGIN_ROOT)); break; case OPTION_SAMPLE_RATE: setSampleRate(programOptionsGetNumber(programOptions, OPTION_SAMPLE_RATE)); break; case OPTION_TAIL_TIME: tailTimeInMs = (long)programOptionsGetNumber(programOptions, OPTION_TAIL_TIME); break; case OPTION_TEMPO: setTempo(programOptionsGetNumber(programOptions, OPTION_TEMPO)); break; case OPTION_TIME_SIGNATURE: if(!setTimeSignatureFromString(programOptionsGetString(programOptions, OPTION_TIME_SIGNATURE))) { return RETURN_CODE_INVALID_ARGUMENT; } break; case OPTION_ZEBRA_SIZE: setLoggingZebraSize((int)programOptionsGetNumber(programOptions, OPTION_ZEBRA_SIZE)); break; default: // Ignore -- no special handling needs to be performed here break; } } } if(programOptions->options[OPTION_LIST_PLUGINS]->enabled) { listAvailablePlugins(pluginSearchRoot); return RETURN_CODE_NOT_RUN; } if(programOptions->options[OPTION_LIST_FILE_TYPES]->enabled) { sampleSourcePrintSupportedTypes(); return RETURN_CODE_NOT_RUN; } printWelcomeMessage(argc, argv); if((result = setupInputSource(inputSource)) != RETURN_CODE_SUCCESS) { logError("Input source could not be opened, exiting"); return result; } if((result = buildPluginChain(pluginChain, programOptionsGetString(programOptions, OPTION_PLUGIN), pluginSearchRoot)) != RETURN_CODE_SUCCESS) { logError("Plugin chain could not be constructed, exiting"); return result; } if(midiSource != NULL) { result = setupMidiSource(midiSource, &midiSequence); if(result != RETURN_CODE_SUCCESS) { logError("MIDI source could not be opened, exiting"); return result; } } // Copy plugins before they have been opened if(programOptions->options[OPTION_ERROR_REPORT]->enabled) { if(errorReporterShouldCopyPlugins()) { if(!errorReporterCopyPlugins(errorReporter, pluginChain)) { logWarn("Failed copying plugins to error report directory"); } } } // Initialize the plugin chain after the global sample rate has been set result = pluginChainInitialize(pluginChain); if(result != RETURN_CODE_SUCCESS) { logError("Could not initialize plugin chain"); return result; } // Display info for plugins in the chain before checking for valid input/output sources if(shouldDisplayPluginInfo) { pluginChainInspect(pluginChain); } // Execute any parameter changes if(programOptions->options[OPTION_PARAMETER]->enabled) { if(!pluginChainSetParameters(pluginChain, programOptionsGetList(programOptions, OPTION_PARAMETER))) { return RETURN_CODE_INVALID_ARGUMENT; } } // Setup output source here. Having an invalid output source should not cause the program // to exit if the user only wants to list plugins or query info about a chain. if((result = setupOutputSource(outputSource)) != RETURN_CODE_SUCCESS) { logError("Output source could not be opened, exiting"); return result; } // Verify input/output sources. This must be done after the plugin chain is initialized // otherwise the head plugin type is not known, which influences whether we must abort // processing. if(programOptions->options[OPTION_ERROR_REPORT]->enabled) { if(charStringIsEqualToCString(inputSource->sourceName, "-", false) || charStringIsEqualToCString(outputSource->sourceName, "-", false)) { printf("ERROR: Using stdin/stdout is incompatible with --error-report\n"); return RETURN_CODE_NOT_RUN; } if(midiSource != NULL && charStringIsEqualToCString(midiSource->sourceName, "-", false)) { printf("ERROR: MIDI source from stdin is incompatible with --error-report\n"); return RETURN_CODE_NOT_RUN; } } if(outputSource == NULL) { logInternalError("Default output sample source was null"); return RETURN_CODE_INTERNAL_ERROR; } if(inputSource == NULL || inputSource->sampleSourceType == SAMPLE_SOURCE_TYPE_SILENCE) { // If the first plugin in the chain is an instrument, use the silent source as our input and // make sure that there is a corresponding MIDI file headPlugin = pluginChain->plugins[0]; if(headPlugin->pluginType == PLUGIN_TYPE_INSTRUMENT) { if(midiSource == NULL) { // I guess some instruments (like white noise generators etc.) don't necessarily // need MIDI, actually this is most useful for our internal plugins and generators. // Anyways, this should only be a soft warning for those who know what they're doing. logWarn("Plugin chain contains an instrument, but no MIDI source was supplied"); if(maxTimeInMs == 0) { // However, if --max-time wasn't given, then there is effectively no input source // and thus processing would continue forever. That won't work. logError("No valid input source or maximum time, don't know when to stop processing"); return RETURN_CODE_MISSING_REQUIRED_OPTION; } else { // If maximum time was given and there is no other input source, then use silence inputSource = newSampleSourceSilence(); } } } else { logError("Plugin chain contains only effects, but no input source was supplied"); return RETURN_CODE_MISSING_REQUIRED_OPTION; } } inputSampleBuffer = newSampleBuffer(getNumChannels(), getBlocksize()); inputTimer = newTaskTimerWithCString(PROGRAM_NAME, "Input Source"); outputSampleBuffer = newSampleBuffer(getNumChannels(), getBlocksize()); outputTimer = newTaskTimerWithCString(PROGRAM_NAME, "Output Source"); // Initialization is finished, we should be able to free this memory now freeProgramOptions(programOptions); // If a maximum time was given, figure it out here if(maxTimeInMs > 0) { maxTimeInFrames = (unsigned long)(maxTimeInMs * getSampleRate()) / 1000l; } processingDelayInFrames = pluginChainGetProcessingDelay(pluginChain); // Get largest tail time requested by any plugin in the chain tailTimeInMs += pluginChainGetMaximumTailTimeInMs(pluginChain); tailTimeInFrames = (unsigned long)(tailTimeInMs * getSampleRate()) / 1000l + processingDelayInFrames; pluginChainPrepareForProcessing(pluginChain); // Update sample rate on the event logger setLoggingZebraSize((long)getSampleRate()); logInfo("Starting processing input source"); logDebug("Sample rate: %.0f", getSampleRate()); logDebug("Blocksize: %d", getBlocksize()); logDebug("Channels: %d", getNumChannels()); logDebug("Tempo: %.2f", getTempo()); logDebug("Processing delay frames: %lu", processingDelayInFrames); logDebug("Time signature: %d/%d", getTimeSignatureBeatsPerMeasure(), getTimeSignatureNoteValue()); taskTimerStop(initTimer); silentSampleInput = sampleSourceFactory(NULL); silentSampleOutput = sampleSourceFactory(NULL); // Main processing loop while(!finishedReading) { taskTimerStart(inputTimer); finishedReading = !readInput(inputSource, silentSampleInput, inputSampleBuffer, tailTimeInFrames); // TODO: For streaming MIDI, we would need to read in events from source here if(midiSequence != NULL) { LinkedList midiEventsForBlock = newLinkedList(); // MIDI source overrides the value set to finishedReading by the input source finishedReading = !fillMidiEventsFromRange(midiSequence, audioClock->currentFrame, getBlocksize(), midiEventsForBlock); linkedListForeach(midiEventsForBlock, _processMidiMetaEvent, &finishedReading); pluginChainProcessMidi(pluginChain, midiEventsForBlock); freeLinkedList(midiEventsForBlock); } taskTimerStop(inputTimer); if(maxTimeInFrames > 0 && audioClock->currentFrame >= maxTimeInFrames) { logInfo("Maximum time reached, stopping processing after this block"); finishedReading = true; } pluginChainProcessAudio(pluginChain, inputSampleBuffer, outputSampleBuffer); taskTimerStart(outputTimer); if(finishedReading) { outputSampleBuffer->blocksize = inputSampleBuffer->blocksize;//The input buffer size has been adjusted. logDebug("Using buffer size of %d for final block", outputSampleBuffer->blocksize); } writeOutput(outputSource, silentSampleOutput, outputSampleBuffer, processingDelayInFrames); taskTimerStop(outputTimer); advanceAudioClock(audioClock, outputSampleBuffer->blocksize); } // Close file handles for input/output sources silentSampleInput->closeSampleSource(silentSampleInput); silentSampleOutput->closeSampleSource(silentSampleOutput); inputSource->closeSampleSource(inputSource); outputSource->closeSampleSource(outputSource); // Print out statistics about each plugin's time usage // TODO: On windows, the total processing time is stored in clocks and not milliseconds // These values must be converted using the QueryPerformanceFrequency() function audioClockStop(audioClock); taskTimerStop(totalTimer); if(totalTimer->totalTaskTime > 0) { taskTimerList = newLinkedList(); linkedListAppend(taskTimerList, initTimer); linkedListAppend(taskTimerList, inputTimer); linkedListAppend(taskTimerList, outputTimer); for(i = 0; i < pluginChain->numPlugins; i++) { linkedListAppend(taskTimerList, pluginChain->audioTimers[i]); linkedListAppend(taskTimerList, pluginChain->midiTimers[i]); } totalTimeString = taskTimerHumanReadbleString(totalTimer); logInfo("Total processing time %s, approximate breakdown:", totalTimeString->data); linkedListForeach(taskTimerList, _printTaskTime, totalTimer); } else { // Woo-hoo! logInfo("Total processing time <1ms. Either something went wrong, or your computer is smokin' fast!"); } freeTaskTimer(initTimer); freeTaskTimer(inputTimer); freeTaskTimer(outputTimer); freeTaskTimer(totalTimer); freeLinkedList(taskTimerList); freeCharString(totalTimeString); if(midiSequence != NULL) { logInfo("Read %ld MIDI events from %s", midiSequence->numMidiEventsProcessed, midiSource->sourceName->data); } else { logInfo("Read %ld frames from %s", inputSource->numSamplesProcessed / getNumChannels(), inputSource->sourceName->data); } logInfo("Wrote %ld frames to %s", outputSource->numSamplesProcessed / getNumChannels(), outputSource->sourceName->data); // Shut down and free data (will also close open files, plugins, etc) logInfo("Shutting down"); freeSampleSource(inputSource); freeSampleSource(outputSource); freeSampleBuffer(inputSampleBuffer); freeSampleBuffer(outputSampleBuffer); pluginChainShutdown(pluginChain); freePluginChain(pluginChain); if(midiSource != NULL) { freeMidiSource(midiSource); } if(midiSequence != NULL) { freeMidiSequence(midiSequence); } freeAudioSettings(); logInfo("Goodbye!"); freeEventLogger(); freeAudioClock(getAudioClock()); if(errorReporter->started) { errorReporterClose(errorReporter); } freeErrorReporter(errorReporter); return RETURN_CODE_SUCCESS; }
/** Called when asyn clients call pasynInt32->write(). * \param[in] pasynUser pasynUser structure that encodes the reason and address. * \param[in] value Value to write. */ asynStatus drvQuadEM::writeInt32(asynUser *pasynUser, epicsInt32 value) { int function = pasynUser->reason; int status = asynSuccess; int channel; const char *paramName; const char* functionName = "writeInt32"; getAddress(pasynUser, &channel); /* Set the parameter in the parameter library. */ status |= setIntegerParam(channel, function, value); /* Fetch the parameter string name for possible use in debugging */ getParamName(function, ¶mName); if (function == P_Acquire) { if (value) { epicsRingBytesFlush(ringBuffer_); ringCount_ = 0; } status |= setAcquire(value); } else if (function == P_AcquireMode) { if (value != QEAcquireModeContinuous) { status |= setAcquire(0); setIntegerParam(P_Acquire, 0); } status |= setAcquireMode(value); status |= readStatus(); } else if (function == P_BiasState) { status |= setBiasState(value); status |= readStatus(); } else if (function == P_BiasInterlock) { status |= setBiasInterlock(value); status |= readStatus(); } else if (function == P_NumChannels) { status |= setNumChannels(value); status |= readStatus(); } else if (function == P_NumAcquire) { status |= setNumAcquire(value); status |= readStatus(); } else if (function == P_PingPong) { status |= setPingPong(value); status |= readStatus(); } else if (function == P_Range) { status |= setRange(value); status |= readStatus(); } else if (function == P_ReadData) { status |= doDataCallbacks(); } else if (function == P_Resolution) { status |= setResolution(value); status |= readStatus(); } else if (function == P_TriggerMode) { status |= setTriggerMode(value); status |= readStatus(); } else if (function == P_ValuesPerRead) { valuesPerRead_ = value; status |= setValuesPerRead(value); status |= readStatus(); } else if (function == P_ReadFormat) { status |= setReadFormat(value); status |= readStatus(); } else if (function == P_ReadStatus) { // We don't do this if we are acquiring, too disruptive if (!acquiring_) { status |= readStatus(); } } else if (function == P_Reset) { status |= reset(); status |= readStatus(); } else { /* All other parameters just get set in parameter list, no need to * act on them here */ } /* Do callbacks so higher layers see any changes */ status |= (asynStatus) callParamCallbacks(); if (status) epicsSnprintf(pasynUser->errorMessage, pasynUser->errorMessageSize, "%s:%s: status=%d, function=%d, name=%s, value=%d", driverName, functionName, status, function, paramName, value); else asynPrint(pasynUser, ASYN_TRACEIO_DRIVER, "%s:%s: function=%d, name=%s, value=%d\n", driverName, functionName, function, paramName, value); return (asynStatus)status; }
void MainWindow::on_actionScan_triggered() { QString s; GSList *devs = NULL; int num_devs, pos; struct sr_dev *dev; char *di_num_probes, *str; struct sr_samplerates *samplerates; const static float mult[] = { 2.f, 2.5f, 2.f }; statusBar()->showMessage(tr("Scanning for logic analyzers..."), 2000); sr_dev_scan(); devs = sr_dev_list(); num_devs = g_slist_length(devs); ui->comboBoxLA->clear(); for (int i = 0; i < num_devs; ++i) { dev = (struct sr_dev *)g_slist_nth_data(devs, i); ui->comboBoxLA->addItem(dev->driver->name); /* TODO: Full name */ } if (num_devs == 0) { s = tr("No supported logic analyzer found."); statusBar()->showMessage(s, 2000); return; } else if (num_devs == 1) { s = tr("Found supported logic analyzer: "); s.append(dev->driver->name); statusBar()->showMessage(s, 2000); } else { /* TODO: Allow user to select one of the devices. */ s = tr("Found multiple logic analyzers: "); for (int i = 0; i < num_devs; ++i) { dev = (struct sr_dev *)g_slist_nth_data(devs, i); s.append(dev->driver->name); if (i != num_devs - 1) s.append(", "); } statusBar()->showMessage(s, 2000); // return; } dev = (struct sr_dev *)g_slist_nth_data(devs, 0 /* opt_dev */); setCurrentLA(0 /* TODO */); di_num_probes = (char *)dev->driver->dev_info_get( dev->driver_index, SR_DI_NUM_PROBES); if (di_num_probes != NULL) { setNumChannels(GPOINTER_TO_INT(di_num_probes)); } else { setNumChannels(8); /* FIXME: Error handling. */ } ui->comboBoxLA->clear(); ui->comboBoxLA->addItem(dev->driver->name); /* TODO: Full name */ s = QString(tr("Channels: %1")).arg(getNumChannels()); ui->labelChannels->setText(s); samplerates = (struct sr_samplerates *)dev->driver->dev_info_get( dev->driver_index, SR_DI_SAMPLERATES); if (!samplerates) { /* TODO: Error handling. */ } /* Populate the combobox with supported samplerates. */ ui->comboBoxSampleRate->clear(); if (!samplerates) { ui->comboBoxSampleRate->addItem("No samplerate"); ui->comboBoxSampleRate->setEnabled(false); } else if (samplerates->list != NULL) { for (int i = 0; samplerates->list[i]; ++i) { str = sr_samplerate_string(samplerates->list[i]); s = QString(str); free(str); ui->comboBoxSampleRate->insertItem(0, s, QVariant::fromValue(samplerates->list[i])); } ui->comboBoxSampleRate->setEnabled(true); } else { pos = 0; for (uint64_t r = samplerates->low; r <= samplerates->high; ) { str = sr_samplerate_string(r); s = QString(str); free(str); ui->comboBoxSampleRate->insertItem(0, s, QVariant::fromValue(r)); r *= mult[pos++]; pos %= 3; } ui->comboBoxSampleRate->setEnabled(true); } ui->comboBoxSampleRate->setCurrentIndex(0); /* FIXME */ ui->comboBoxNumSamples->clear(); ui->comboBoxNumSamples->addItem("100", 100); /* For testing... */ ui->comboBoxNumSamples->addItem("3000000", 3000000); ui->comboBoxNumSamples->addItem("2000000", 2000000); ui->comboBoxNumSamples->addItem("1000000", 1000000); ui->comboBoxNumSamples->setEditable(true); if (getCurrentLA() >= 0) setupDockWidgets(); /* Enable all relevant fields now (i.e. make them non-gray). */ ui->comboBoxNumSamples->setEnabled(true); ui->labelChannels->setEnabled(true); ui->action_Get_samples->setEnabled(true); }
static boolByte _readWaveFileInfo(const char* filename, SampleSourcePcmData extraData) { int chunkOffset = 0; RiffChunk chunk = newRiffChunk(); char format[4]; size_t itemsRead; unsigned int audioFormat; unsigned int byteRate; unsigned int expectedByteRate; unsigned int blockAlign; unsigned int expectedBlockAlign; if(riffChunkReadNext(chunk, extraData->fileHandle, false)) { if(!riffChunkIsIdEqualTo(chunk, "RIFF")) { logFileError(filename, "Invalid RIFF chunk descriptor"); freeRiffChunk(chunk); return false; } // The WAVE file format has two sub-chunks, with the size of both calculated in the size field. Before // either of the subchunks, there are an extra 4 bytes which indicate the format type. We need to read // that before either of the subchunks can be parsed. itemsRead = fread(format, sizeof(byte), 4, extraData->fileHandle); if(itemsRead != 4 || strncmp(format, "WAVE", 4)) { logFileError(filename, "Invalid format description"); freeRiffChunk(chunk); return false; } } else { logFileError(filename, "No chunks following descriptor"); freeRiffChunk(chunk); return false; } if(riffChunkReadNext(chunk, extraData->fileHandle, true)) { if(!riffChunkIsIdEqualTo(chunk, "fmt ")) { logError(filename, "Invalid format chunk header"); freeRiffChunk(chunk); return false; } audioFormat = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; if(audioFormat != 1) { logUnsupportedFeature("Compressed WAVE files"); freeRiffChunk(chunk); return false; } extraData->numChannels = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; setNumChannels(extraData->numChannels); extraData->sampleRate = convertByteArrayToUnsignedInt(chunk->data + chunkOffset); chunkOffset += 4; setSampleRate(extraData->sampleRate); byteRate = convertByteArrayToUnsignedInt(chunk->data + chunkOffset); chunkOffset += 4; blockAlign = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); chunkOffset += 2; extraData->bitsPerSample = convertByteArrayToUnsignedShort(chunk->data + chunkOffset); if(extraData->bitsPerSample > 16) { logUnsupportedFeature("Bitrates greater than 16"); freeRiffChunk(chunk); return false; } else if(extraData->bitsPerSample < 16) { logUnsupportedFeature("Bitrates lower than 16"); freeRiffChunk(chunk); return false; } expectedByteRate = extraData->sampleRate * extraData->numChannels * extraData->bitsPerSample / 8; if(expectedByteRate != byteRate) { logWarn("Possibly invalid bitrate %d, expected %d", byteRate, expectedByteRate); } expectedBlockAlign = extraData->numChannels * extraData->bitsPerSample / 8; if(expectedBlockAlign != blockAlign) { logWarn("Possibly invalid block align %d, expected %d", blockAlign, expectedBlockAlign); } } else { logFileError(filename, "WAVE file has no chunks following format"); freeRiffChunk(chunk); return false; } // We don't need the format data anymore, so free and re-alloc the chunk to avoid a small memory leak freeRiffChunk(chunk); chunk = newRiffChunk(); if(riffChunkReadNext(chunk, extraData->fileHandle, false)) { if(!riffChunkIsIdEqualTo(chunk, "data")) { logFileError(filename, "WAVE file has invalid data chunk header"); freeRiffChunk(chunk); return false; } logDebug("WAVE file has %d bytes", chunk->size); } freeRiffChunk(chunk); return true; }
void GenNode::initImpl() { setChannelMode( ChannelMode::SPECIFIED ); setNumChannels( 1 ); }
void LadderFilter<Type>::prepare (const juce::dsp::ProcessSpec& spec) { setSampleRate (Type (spec.sampleRate)); setNumChannels (spec.numChannels); reset(); }