AudioFileReaderInternal::AudioFileReaderInternal (AudioFileReader const& original, AudioFileRegion const& region, const int bufferSize) throw() : readBuffer (Chars::withSize ((bufferSize > 0) ? bufferSize : AudioFile::DefaultBufferSize)), numFramesPerBuffer (0), newPositionOnNextRead (-1), hitEndOfFile (false), numChannelsChanged (false), audioFileChanged (false), defaultNumChannels (0) { pl_AudioFileReader_Init (getPeerRef()); AudioFileReaderRegion* readerRegion = new AudioFileReaderRegion (original, region); // setName (original.getName() + "#" + region.getLabel()); if (pl_AudioFileReader_OpenWithCustomNextFunction (getPeerRef(), AudioFileReaderInternal_Region_NextFunction, AudioFileReaderInternal_Region_FreeFunction, AudioFileReaderInternal_Region_SetFramePosition, AudioFileReaderInternal_Region_GetFramePosition, readerRegion) == PlankResult_OK) { if (getBytesPerFrame() > 0) numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); } }
AudioFileReaderInternal::AudioFileReaderInternal (AudioFileReaderArray const& audioFiles, const AudioFile::MultiFileTypes multiMode, const int bufferSize, IntVariable* indexRef) throw() : readBuffer (Chars::withSize ((bufferSize > 0) ? bufferSize : AudioFile::DefaultBufferSize)), numFramesPerBuffer (0), newPositionOnNextRead (-1), hitEndOfFile (false), numChannelsChanged (false), audioFileChanged (false), defaultNumChannels (0) { pl_AudioFileReader_Init (getPeerRef()); PlankDynamicArrayRef array = pl_DynamicArray_Create(); pl_DynamicArray_InitWithItemSizeAndSize (array, sizeof (PlankAudioFileReader), audioFiles.length(), true); PlankAudioFileReader* rawArray = static_cast<PlankAudioFileReader*> (pl_DynamicArray_GetArray (array)); for (int i = 0; i < audioFiles.length(); ++i) { AudioFileReader reader = audioFiles.atUnchecked (i); reader.disownPeer (&rawArray[i]); } if (indexRef) nextMultiIndexRef = *indexRef; ResultCode result = pl_AudioFileReader_OpenWithAudioFileArray (getPeerRef(), array, true, multiMode, nextMultiIndexRef.getValuePtr()); if (result == PlankResult_OK) { if (getBytesPerFrame() > 0) numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); } }
ResultCode AudioFileReaderInternal::init (const char* path, AudioFileMetaDataIOFlags const& metaDataIOFlags) throw() { plonk_assert (path != 0); pl_AudioFileReader_Init (getPeerRef()); ResultCode result = pl_AudioFileReader_OpenInternal (getPeerRef(), path, metaDataIOFlags.getValue()); const int bytesPerFrame = getBytesPerFrame(); if (bytesPerFrame == 0) result = PlankResult_AudioFileInavlidType; else if (result == PlankResult_OK) numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); if (this->hasMetaData()) this->getMetaData().sortCuePointsByPosition(); return result; }
AudioFileReaderInternal::AudioFileReaderInternal (AudioFileReaderQueue const& audioFiles, const int bufferSize) throw() : readBuffer (Chars::withSize ((bufferSize > 0) ? bufferSize : AudioFile::DefaultBufferSize)), numFramesPerBuffer (0), newPositionOnNextRead (-1), hitEndOfFile (false), numChannelsChanged (false), audioFileChanged (false), defaultNumChannels (0) { pl_AudioFileReader_Init (getPeerRef()); if (pl_AudioFileReader_OpenWithCustomNextFunction (getPeerRef(), AudioFileReaderInternal_Queue_NextFunction, AudioFileReaderInternal_Queue_FreeFunction, AudioFileReaderInternal_Queue_SetFramePosition, //0, AudioFileReaderInternal_Queue_GetFramePosition, //0, new AudioFileReaderQueue (audioFiles)) == PlankResult_OK) { if (getBytesPerFrame() > 0) numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); } }
AudioFileReaderInternal::AudioFileReaderInternal (FilePathQueue const& fileQueue, const int bufferSize) throw() : readBuffer (Chars::withSize ((bufferSize > 0) ? bufferSize : AudioFile::DefaultBufferSize)), numFramesPerBuffer (0), newPositionOnNextRead (-1), hitEndOfFile (false), numChannelsChanged (false), audioFileChanged (false), defaultNumChannels (0) { pl_AudioFileReader_Init (getPeerRef()); PlankFile file; if (BinaryFileInternal::setupMulti (&file, fileQueue, PLANK_BIGENDIAN)) { if (pl_AudioFileReader_OpenWithFile (getPeerRef(), &file, AudioFile::MetaDataIOFlagsNone) == PlankResult_OK) { if (getBytesPerFrame() > 0) numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); } } }
SoundPreprocessor::SoundPreprocessor(int sampleRate, int bytesPerSample, int channelNumber, int denoiseLevel){ spx_int32_t i=1; spx_int32_t noisesuppress=(spx_int32_t)denoiseLevel; mBytesPerFrame = getBytesPerFrame(sampleRate, bytesPerSample, channelNumber); mpSpStat = speex_preprocess_state_init(mBytesPerFrame / sizeof(short), sampleRate); if(mpSpStat){ speex_preprocess_ctl(mpSpStat, SPEEX_PREPROCESS_SET_DENOISE, &i); speex_preprocess_ctl(mpSpStat, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &noisesuppress); }else{ mBytesPerFrame = 0; } }
ResultCode AudioFileReaderInternal::init (ByteArray const& bytes, AudioFileMetaDataIOFlags const& metaDataIOFlags) throw() { pl_AudioFileReader_Init (getPeerRef()); PlankFile file; if (BinaryFileInternal::setupBytes (&file, bytes, false)) { if (pl_AudioFileReader_OpenWithFile (getPeerRef(), &file, metaDataIOFlags.getValue()) == PlankResult_OK) { numFramesPerBuffer = readBuffer.length() / getBytesPerFrame(); if (this->hasMetaData()) this->getMetaData().sortCuePointsByPosition(); return PlankResult_OK; } } return PlankResult_UnknownError; }
bool WAVAudioFile::decode(const unsigned char *ubuf, size_t sourceBytes, size_t targetSampleRate, size_t targetChannels, size_t nframes, std::vector<float *> &target, bool adding) { size_t sourceChannels = getChannels(); size_t sourceSampleRate = getSampleRate(); size_t fileFrames = sourceBytes / getBytesPerFrame(); int bitsPerSample = getBitsPerSample(); if (bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 24 && bitsPerSample != 32) { // 32-bit is IEEE-float (enforced in RIFFAudioFile) RG_WARNING << "WAVAudioFile::decode: unsupported " << bitsPerSample << "-bit sample size"; return false; } #ifdef DEBUG_DECODE RG_DEBUG << "WAVAudioFile::decode: " << sourceBytes << " bytes -> " << nframes << " frames, SSR " << getSampleRate() << ", TSR " << targetSampleRate << ", sch " << getChannels() << ", tch " << targetChannels; #endif // If we're reading a stereo file onto a mono target, we mix the // two channels. If we're reading mono to stereo, we duplicate // the mono channel. Otherwise if the numbers of channels differ, // we just copy across the ones that do match and zero the rest. bool reduceToMono = (targetChannels == 1 && sourceChannels == 2); for (size_t ch = 0; ch < sourceChannels; ++ch) { if (!reduceToMono || ch == 0) { if (ch >= targetChannels) break; if (!adding) memset(target[ch], 0, nframes * sizeof(float)); } int tch = ch; // target channel for this data if (reduceToMono && ch == 1) { tch = 0; } float ratio = 1.0; if (sourceSampleRate != targetSampleRate) { ratio = float(sourceSampleRate) / float(targetSampleRate); } for (size_t i = 0; i < nframes; ++i) { size_t j = i; if (sourceSampleRate != targetSampleRate) { j = size_t(i * ratio); } if (j >= fileFrames) j = fileFrames - 1; float sample = convertBytesToSample (&ubuf[(bitsPerSample / 8) * (ch + j * sourceChannels)]); target[tch][i] += sample; } } // Now deal with any excess target channels for (size_t ch = sourceChannels; ch < targetChannels; ++ch) { if (ch == 1 && targetChannels == 2) { // copy mono to stereo if (!adding) { memcpy(target[ch], target[ch - 1], nframes * sizeof(float)); } else { for (size_t i = 0; i < nframes; ++i) { target[ch][i] += target[ch - 1][i]; } } } else { if (!adding) { memset(target[ch], 0, nframes * sizeof(float)); } } } return true; }
int AudioFileReaderInternal::getBytesPerSample() const throw() { return getBytesPerFrame() / getNumChannels(); }