/* Calculate the power spectrum */ void fft::powerSpectrum_vdsp(int start, float *data, float *window, float *magnitude,float *phase) { uint32_t i; //multiply by window vDSP_vmul(data, 1, window, 1, in_real, 1, n); //convert to split complex format - evens and odds vDSP_ctoz((COMPLEX *) in_real, 2, &A, 1, half); //calc fft vDSP_fft_zrip(setupReal, &A, 1, log2n, FFT_FORWARD); //scale by 2 (see vDSP docs) static float scale=0.5 ; vDSP_vsmul(A.realp, 1, &scale, A.realp, 1, half); vDSP_vsmul(A.imagp, 1, &scale, A.imagp, 1, half); //back to split complex format vDSP_ztoc(&A, 1, (COMPLEX*) out_real, 2, half); //convert to polar vDSP_polar(out_real, 2, polar, 2, half); for (i = 0; i < half; i++) { magnitude[i]=polar[2*i]+1.0; phase[i]=polar[2*i + 1]; } }
void ofxFFT::calc() { //Generate a split complex vector from the real data vDSP_ctoz((COMPLEX *)input, 2, &mDspSplitComplex, 1, mFFTLength); //Take the fft and scale appropriately vDSP_fft_zrip(mSpectrumAnalysis, &mDspSplitComplex, 1, log2n, FFT_FORWARD); // vDSP_fft_zrip(mSpectrumAnalysis, &mDspSplitComplex, 1, log2n, FFT_INVERSE); vDSP_vsmul(mDspSplitComplex.realp, 1, &mFFTNormFactor, mDspSplitComplex.realp, 1, mFFTLength); vDSP_vsmul(mDspSplitComplex.imagp, 1, &mFFTNormFactor, mDspSplitComplex.imagp, 1, mFFTLength); // /* The output signal is now in a split real form. Use the function // * vDSP_ztoc to get a split real vector. */ // vDSP_ztoc(&mDspSplitComplex, 1, (COMPLEX *) output, 2, mFFTLength); // //Zero out the nyquist value mDspSplitComplex.imagp[0] = 0.0; //Convert the fft data to dB vDSP_zvmags(&mDspSplitComplex, 1, amplitude, 1, mFFTLength); //In order to avoid taking log10 of zero, an adjusting factor is added in to make the minimum value equal -128dB float mAdjust0DB = ADJUST_0_DB; vDSP_vsadd(amplitude, 1, &mAdjust0DB, power, 1, mFFTLength); float one = 1; vDSP_vdbcon(power, 1, &one, power, 1, mFFTLength, 0); }
void FFTAccelerate::doFFTReal(float samples[], float amp[], int numSamples) { int i; vDSP_Length log2n = log2f(numSamples); int nOver2 = numSamples/2; //-- window //vDSP_blkman_window(window, windowSize, 0); vDSP_vmul(samples, 1, window, 1, in_real, 1, numSamples); //Convert float array of reals samples to COMPLEX_SPLIT array A vDSP_ctoz((COMPLEX*)in_real,2,&A,1,nOver2); //Perform FFT using fftSetup and A //Results are returned in A vDSP_fft_zrip(fftSetup, &A, 1, log2n, FFT_FORWARD); // scale by 1/2*n because vDSP_fft_zrip doesn't use the right scaling factors natively ("for better performances") { const float scale = 1.0f/(2.0f*(float)numSamples); vDSP_vsmul( A.realp, 1, &scale, A.realp, 1, numSamples/2 ); vDSP_vsmul( A.imagp, 1, &scale, A.imagp, 1, numSamples/2 ); } //Convert COMPLEX_SPLIT A result to float array to be returned /*amp[0] = A.realp[0]/(numSamples*2); for(i=1;i<numSamples/2;i++) amp[i]=sqrt(A.realp[i]*A.realp[i]+A.imagp[i]*A.imagp[i]);*/ // collapse split complex array into a real array. // split[0] contains the DC, and the values we're interested in are split[1] to split[len/2] (since the rest are complex conjugates) vDSP_zvabs( &A, 1, amp, 1, numSamples/2 ); }
void scfft_dowindowing(float *data, unsigned int winsize, unsigned int fullsize, unsigned short log2_winsize, short wintype, float scalefac){ int i; if(wintype != WINDOW_RECT){ float *win = fftWindow[wintype][log2_winsize]; if (!win) return; #if SC_DARWIN vDSP_vmul(data, 1, win, 1, data, 1, winsize); #else --win; float *in = data - 1; for (i=0; i< winsize ; ++i) { *++in *= *++win; } #endif } // scale factor is different for different libs. But the compiler switch here is about using vDSP's fast multiplication method. #if SC_DARWIN vDSP_vsmul(data, 1, &scalefac, data, 1, winsize); #else for(int i=0; i<winsize; ++i){ data[i] *= scalefac; } #endif // Zero-padding: memset(data + winsize, 0, (fullsize - winsize) * sizeof(float)); }
void CsoundObject_writeOpenCLPVSReadPath(CsoundObject *self, size_t triangleFilterBandsCount, size_t *bestTriangleBandMatches, Float32 *paletteSegmentFramesCounts, Float32 *paletteMagnitudeDifferences, Matrix32 *triangleBandGains) { Float32 frameLengthInSeconds = 1./44100. * 256.; for (size_t i = 0; i < triangleFilterBandsCount; ++i) { Float32 paletteSegmentLengthInSeconds = frameLengthInSeconds * paletteSegmentFramesCounts[i]; Float32 startFrameInSeconds = (Float32)bestTriangleBandMatches[i] * frameLengthInSeconds; Float32 endFrameInSeconds = startFrameInSeconds + paletteSegmentLengthInSeconds; Float32 *warpTablePointer, *bandGainTablePointer; csoundGetTable(self->csound, &warpTablePointer, (SInt32)(warpPathTableBaseNumber + i)); csoundGetTable(self->csound, &bandGainTablePointer, (SInt32)(bandGainTableBaseNumber + i)); vDSP_vgen(&startFrameInSeconds, &endFrameInSeconds, warpTablePointer, 1, self->analysisSegmentFramesCount); vDSP_vsmul(Matrix_getRow(triangleBandGains, i), 1, &paletteMagnitudeDifferences[i], bandGainTablePointer, 1, triangleBandGains->columnCount); // vDSP_vsadd(self->frameTimes, 1, &startFrameInSeconds, tablePointer, 1, self->analysisSegmentFramesCount); } }
void dm_vsmul(float value_, float* output_, unsigned long size_) { #ifdef DSP_USE_ACCELERATE vDSP_vsmul(output_, 1, &value_, output_, 1, size_); #else // generic std::transform(output_, output_ + size_, output_, std::bind2nd(std::multiplies<float>(), value_)); #endif }
/******************************************************************************* Int16BufferToFloat */ Error_t Int16BufferToFloat(float* dest, const signed short* src, unsigned length) { #ifdef __APPLE__ // Use the Accelerate framework if we have it float temp[length]; float scale = 1.0 / (float)INT16_MAX; vDSP_vflt16(src,1,temp,1,length); vDSP_vsmul(temp, 1, &scale, dest, 1, length); #else // Otherwise do it manually unsigned i; const unsigned end = 4 * (length / 4); for (i = 0; i < end; i+=4) { dest[i] = int16ToFloat(*src++); dest[i + 1] = int16ToFloat(*src++); dest[i + 2] = int16ToFloat(*src++); dest[i + 3] = int16ToFloat(*src++); } for (i = end; i < length; ++i) { dest[i] = int16ToFloat(*src++); } #endif return NOERR; }
/******************************************************************************* VectorScalarMultiply */ Error_t VectorScalarMultiply(float *dest, const float *in1, const float scalar, unsigned length) { #ifdef __APPLE__ // Use the Accelerate framework if we have it vDSP_vsmul(in1, 1, &scalar,dest, 1, length); #else // Otherwise do it manually unsigned i; const unsigned end = 4 * (length / 4); for (i = 0; i < end; i+=4) { dest[i] = in1[i] * scalar; dest[i + 1] = in1[i + 1] * scalar; dest[i + 2] = in1[i + 2] * scalar; dest[i + 3] = in1[i + 3] * scalar; } for (i = end; i < length; ++i) { dest[i] = in1[i] * scalar; } #endif return NOERR; }
void FFTFrame::doInverseFFT(float* data) { vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_INVERSE); vDSP_ztoc(&m_frame, 1, (DSPComplex*)data, 2, m_FFTSize / 2); // Do final scaling so that x == IFFT(FFT(x)) float scale = 0.5f / m_FFTSize; vDSP_vsmul(data, 1, &scale, data, 1, m_FFTSize); }
void JUCE_CALLTYPE FloatVectorOperations::copyWithMultiply (float* dest, const float* src, float multiplier, int num) noexcept { #if JUCE_USE_VDSP_FRAMEWORK vDSP_vsmul (src, 1, &multiplier, dest, 1, num); #else JUCE_PERFORM_VEC_OP_SRC_DEST (dest[i] = src[i] * multiplier, Mode::mul (mult, s), JUCE_LOAD_SRC, JUCE_INCREMENT_SRC_DEST, const Mode::ParallelType mult = Mode::load1 (multiplier);) #endif }
void siglab_cbPhaseDeg(float *src, float *dst, int nfft) { // requires dst to be nel/2+1 element or more // src is in separated Re and Im arrays, A(Im) = A(Re) + NFFT/2 static float phsf = 45.0f/atan2f(1.0f,1.0f); siglab_cbPhase(src,dst,nfft); vDSP_vsmul(dst,1,&phsf,dst,1,nfft/2); }
void fft(FFT_FRAME* frame, float* audioBuffer) { FFT* fft = frame->fft; // Do some data packing stuff vDSP_ctoz((COMPLEX*)audioBuffer, 2, &frame->buffer, 1, fft->sizeOverTwo); // This applies the windowing if (fft->window != NULL) vDSP_vmul(audioBuffer, 1, fft->window, 1, audioBuffer, 1, fft->size); // Actually perform the fft vDSP_fft_zrip(fft->fftSetup, &frame->buffer, 1, fft->logTwo, FFT_FORWARD); // Do some scaling vDSP_vsmul(frame->buffer.realp, 1, &fft->normalize, frame->buffer.realp, 1, fft->sizeOverTwo); vDSP_vsmul(frame->buffer.imagp, 1, &fft->normalize, frame->buffer.imagp, 1, fft->sizeOverTwo); // Zero out DC offset frame->buffer.imagp[0] = 0.0; }
// ------------------------------------------------- // FFT post-processing // void siglab_sbDB(float *src, float *dst, int nfft) { // requires src to be power instead of amplitude // already processed for pwr from complex format. static float dbsf = 10.0f/logf(10.0f); int nfft2 = nfft/2; float *pdst = dst; for(int ix = nfft2; --ix >= 0;) *pdst++ = logf(*src++); vDSP_vsmul(dst,1,&dbsf,dst,1,nfft2); }
void FFTHelper::ComputeFFT(Float32* inAudioData, Float32* outFFTData) { if (inAudioData == NULL || outFFTData == NULL) return; //Generate a split complex vector from the real data vDSP_ctoz((COMPLEX *)inAudioData, 2, &mDspSplitComplex, 1, mFFTLength); //Take the fft and scale appropriately vDSP_fft_zrip(mSpectrumAnalysis, &mDspSplitComplex, 1, mLog2N, kFFTDirection_Forward); vDSP_vsmul(mDspSplitComplex.realp, 1, &mFFTNormFactor, mDspSplitComplex.realp, 1, mFFTLength); vDSP_vsmul(mDspSplitComplex.imagp, 1, &mFFTNormFactor, mDspSplitComplex.imagp, 1, mFFTLength); //Zero out the nyquist value mDspSplitComplex.imagp[0] = 0.0; //Convert the fft data to dB vDSP_zvmags(&mDspSplitComplex, 1, outFFTData, 1, mFFTLength); //In order to avoid taking log10 of zero, an adjusting factor is added in to make the minimum value equal -128dB vDSP_vsadd(outFFTData, 1, &kAdjust0DB, outFFTData, 1, mFFTLength); Float32 one = 1; vDSP_vdbcon(outFFTData, 1, &one, outFFTData, 1, mFFTLength, 0); Float32 max = -100; int index = -1; for(unsigned long i = 0; i < mFFTLength; i++){ if(outFFTData[i] > max){ max = outFFTData[i]; index = i; } } if(max > -40){ // Filter out anything else, as it is unlikely to be the microwave beep recentMaxIndex = index; //if(index == 181){ // We found the microwave beep //printf("%d %f\n", index, max); }else{ recentMaxIndex = 0; } }
void fftIp(FFT* fftObject, float* audioBuffer) { // Creating pointer of COMPLEX_SPLIT to use in calculations (points to same data as audioBuffer) COMPLEX_SPLIT* fftBuffer = (COMPLEX_SPLIT *)&audioBuffer[0]; // Apply windowing if (fftObject->window != NULL) vDSP_vmul(audioBuffer, 1, fftObject->window, 1, audioBuffer, 1, fftObject->size); // This seems correct-ish TODO: check casting vDSP_ctoz((COMPLEX*)audioBuffer, 2, fftBuffer, 1, fftObject->sizeOverTwo); // Perform fft vDSP_fft_zrip(fftObject->fftSetup, fftBuffer, 1, fftObject->logTwo, FFT_FORWARD); // Do scaling vDSP_vsmul(fftBuffer->realp, 1, &fftObject->normalize, fftBuffer->realp, 1, fftObject->sizeOverTwo); vDSP_vsmul(fftBuffer->imagp, 1, &fftObject->normalize, fftBuffer->imagp, 1, fftObject->sizeOverTwo); // zero out DC offset fftBuffer->imagp[0] = 0.0; }
void FFTHelper::ComputeFFT(Float32* inAudioData, Float32* outFFTData) { if (inAudioData == NULL || outFFTData == NULL) return; //Generate a split complex vector from the real data vDSP_ctoz((COMPLEX *)inAudioData, 2, &mDspSplitComplex, 1, mFFTLength); //Take the fft and scale appropriately vDSP_fft_zrip(mSpectrumAnalysis, &mDspSplitComplex, 1, mLog2N, kFFTDirection_Forward); vDSP_vsmul(mDspSplitComplex.realp, 1, &mFFTNormFactor, mDspSplitComplex.realp, 1, mFFTLength); vDSP_vsmul(mDspSplitComplex.imagp, 1, &mFFTNormFactor, mDspSplitComplex.imagp, 1, mFFTLength); //Zero out the nyquist value mDspSplitComplex.imagp[0] = 0.0; //Convert the fft data to dB vDSP_zvmags(&mDspSplitComplex, 1, outFFTData, 1, mFFTLength); //In order to avoid taking log10 of zero, an adjusting factor is added in to make the minimum value equal -128dB vDSP_vsadd(outFFTData, 1, &kAdjust0DB, outFFTData, 1, mFFTLength); Float32 one = 1; vDSP_vdbcon(outFFTData, 1, &one, outFFTData, 1, mFFTLength, 0); }
void FFTFrame::multiply(const FFTFrame& frame) { FFTFrame& frame1 = *this; const FFTFrame& frame2 = frame; float* realP1 = frame1.realData(); float* imagP1 = frame1.imagData(); const float* realP2 = frame2.realData(); const float* imagP2 = frame2.imagData(); // Scale accounts for vecLib's peculiar scaling // This ensures the right scaling all the way back to inverse FFT float scale = 0.5f; // Multiply packed DC/nyquist component realP1[0] *= scale * realP2[0]; imagP1[0] *= scale * imagP2[0]; // Multiply the rest, skipping packed DC/Nyquist components DSPSplitComplex sc1 = frame1.dspSplitComplex(); sc1.realp++; sc1.imagp++; DSPSplitComplex sc2 = frame2.dspSplitComplex(); sc2.realp++; sc2.imagp++; unsigned halfSize = m_FFTSize / 2; // Complex multiply vDSP_zvmul(&sc1, 1, &sc2, 1, &sc1, 1, halfSize - 1, 1 /* normal multiplication */); // We've previously scaled the packed part, now scale the rest..... vDSP_vsmul(sc1.realp, 1, &scale, sc1.realp, 1, halfSize - 1); vDSP_vsmul(sc1.imagp, 1, &scale, sc1.imagp, 1, halfSize - 1); }
void fft::inversePowerSpectrum_vdsp(int start, float *finalOut, float *window, float *magnitude,float *phase) { uint32_t i; for (i = 0; i < half; i++) { // polar[2*i] = pow(10.0, magnitude[i] / 20.0) - 1.0; polar[2*i] = magnitude[i] - 1.0; polar[2*i + 1] = phase[i]; } vDSP_rect(polar, 2, in_real, 2, half); vDSP_ctoz((COMPLEX*) in_real, 2, &A, 1, half); vDSP_fft_zrip(setupReal, &A, 1, log2n, FFT_INVERSE); vDSP_ztoc(&A, 1, (COMPLEX*) out_real, 2, half); static float scale = 1./n; vDSP_vsmul(out_real, 1, &scale, out_real, 1, n); //multiply by window vDSP_vmul(out_real, 1, window, 1, finalOut, 1, n); }
void mul( const float *array, float scalar, float *result, size_t length ) { vDSP_vsmul( array, 1, &scalar, result, 1, length ); }
void siglab_sbMpy1(float kval, float *src, float *dst, int nel) { vDSP_vsmul(src, 1, &kval, dst, 1, nel); }
ITunesPixelFormat ivis_render( ITunesVis* plugin, short audio_data[][512], float freq_data[][512], void* buffer, long buffer_size, bool idle ) { ITunesPixelFormat format = ITunesPixelFormatUnknown; /* make sure we have a plugin and a visual handler */ if ( !plugin || !plugin->imports.visual_handler ) return format; int i=0, w=0; RenderVisualData visual_data; DSPSplitComplex splitComplex[2]; float *data[2]; /* perform FFT if we're not idling */ if ( ! idle ) { /* allocate some complex vars */ for ( i = 0 ; i < 2 ; i++ ) { splitComplex[i].realp = calloc( 512, sizeof(float) ); splitComplex[i].imagp = calloc( 512, sizeof(float) ); data[i] = calloc( 512, sizeof(float) ); } /* 2 channels for spectrum and waveform data */ visual_data.numWaveformChannels = 2; visual_data.numSpectrumChannels = 2; /* copy spectrum audio data to visual data strucure */ for ( w = 0 ; w < 512 ; w++ ) { /* iTunes visualizers expect waveform data from 0 - 255, with level 0 at 128 */ visual_data.waveformData[0][w] = (UInt8)( (long)(audio_data[0][w]) / 128 + 128 ); visual_data.waveformData[1][w] = (UInt8)( (long)(audio_data[1][w]) / 128 + 128 ); /* scale to -1, +1 */ *( data[0] + w ) = (float)(( audio_data[0][w]) / (2.0 * 8192.0) ); *( data[1] + w ) = (float)(( audio_data[1][w]) / (2.0 * 8192.0) ); } /* FFT scaler */ float scale = ( 1.0 / 1024.0 ) ; /* scale by length of input * 2 (due to how vDSP does FFTs) */ float nyq=0, dc=0, freq=0; for ( i = 0 ; i < 2 ; i++ ) { /* pack data into format fft_zrip expects it */ vDSP_ctoz( (COMPLEX*)( data[i] ), 2, &( splitComplex[i] ), 1, 256 ); /* perform FFT on normalized audio data */ fft_zrip( plugin->fft_setup, &( splitComplex[i] ), 1, 9, FFT_FORWARD ); /* scale the values */ vDSP_vsmul( splitComplex[i].realp, 1, &scale, splitComplex[i].realp, 1, 256 ); vDSP_vsmul( splitComplex[i].imagp, 1, &scale, splitComplex[i].imagp, 1, 256 ); /* unpack data */ vDSP_ztoc( &splitComplex[i], 1, (COMPLEX*)( data[i] ), 2, 256 ); /* ignore phase */ dc = *(data[i]) = fabs( *(data[i]) ); nyq = fabs( *(data[i] + 1) ); for ( w = 1 ; w < 256 ; w++ ) { /* don't use vDSP for this since there's some overflow */ freq = hypot( *(data[i] + w * 2), *(data[i] + w * 2 + 1) ) * 256 * 16; freq = MAX( 0, freq ); freq = MIN( 255, freq ); visual_data.spectrumData[i][ w - 1 ] = (UInt8)( freq ); } visual_data.spectrumData[i][256] = nyq; } /* deallocate complex vars */ for ( i = 0 ; i < 2 ; i++ ) { free( splitComplex[i].realp ); free( splitComplex[i].imagp ); free( data[i] ); } /* update the render message with the new visual data and timestamp */ plugin->visual_message.u.renderMessage.renderData = &visual_data; plugin->visual_message.u.renderMessage.timeStampID++; } /* update time */ plugin->visual_message.u.renderMessage.currentPositionInMS = ivis_current_time() - plugin->start_time; // FIXME: real time /* save our GL context and send the vis a render message */ CGLContextObj currentContext = CGLGetCurrentContext(); if ( plugin->gl_context ) aglSetCurrentContext( (AGLContext)(plugin->gl_context ) ); /* call the plugin's render method */ if ( idle ) { /* idle message */ if ( plugin->wants_idle ) plugin->imports.visual_handler( kVisualPluginIdleMessage, &( plugin->visual_message ), plugin->vis_ref ); } else { /* render message */ plugin->imports.visual_handler( kVisualPluginRenderMessage, &( plugin->visual_message ), plugin->vis_ref ); /* set position message */ plugin->visual_message.u.setPositionMessage.positionTimeInMS = plugin->visual_message.u.renderMessage.currentPositionInMS; plugin->imports.visual_handler( kVisualPluginSetPositionMessage, &( plugin->visual_message ), plugin->vis_ref ); } /* update message */ plugin->imports.visual_handler( kVisualPluginUpdateMessage, NULL, plugin->vis_ref ); /* read pixels and restore our GL context */ CGLLockContext( CGLGetCurrentContext() ); switch ( get_pixels( buffer, buffer_size, CGLGetCurrentContext() != currentContext ) ) { case 3: format = ITunesPixelFormatRGB24; break; case 4: format = ITunesPixelFormatRGBA32; break; default: break; } CGLUnlockContext ( CGLGetCurrentContext() ); /* restore our GL context */ CGLSetCurrentContext( currentContext ); return format; }
// apple bug static void Workaround_vsub(const float *a, int aStride, const float *b, int bStride, float *c, int cStride, int size ) { const float minusOne = -1.0f; vDSP_vsmul(a, aStride, &minusOne, c, cStride, size); vDSP_vadd( c, cStride, b, bStride, c, cStride, size ); }
Boolean FFTBufferManager::ComputeFFT(int32_t *outFFTData) { if (HasNewAudioData()) { // for(int i=0;i<mFFTLength;i++) // { // if(mAudioBuffer[i]>0.15) // printf("%f\n",mAudioBuffer[i]); // } //Generate a split complex vector from the real data # define NOISE_FILTER 0.01; for(int i=0;i<4096;i++) { //DENOISE if(mAudioBuffer[i]>0) { mAudioBuffer[i] -= NOISE_FILTER; if(mAudioBuffer[i] < 0) { mAudioBuffer[i] = 0; } } else if(mAudioBuffer[i]<0) { mAudioBuffer[i] += NOISE_FILTER; if(mAudioBuffer[i]>0) { mAudioBuffer[i] = 0; } } else { mAudioBuffer[i] = 0; } } vDSP_ctoz((COMPLEX *)mAudioBuffer, 2, &mDspSplitComplex, 1, mFFTLength); //Take the fft and scale appropriately vDSP_fft_zrip(mSpectrumAnalysis, &mDspSplitComplex, 1, mLog2N, kFFTDirection_Forward); vDSP_vsmul(mDspSplitComplex.realp, 1, &mFFTNormFactor, mDspSplitComplex.realp, 1, mFFTLength); vDSP_vsmul(mDspSplitComplex.imagp, 1, &mFFTNormFactor, mDspSplitComplex.imagp, 1, mFFTLength); //Zero out the nyquist value mDspSplitComplex.imagp[0] = 0.0; //Convert the fft data to dB Float32 tmpData[mFFTLength]; vDSP_zvmags(&mDspSplitComplex, 1, tmpData, 1, mFFTLength); //In order to avoid taking log10 of zero, an adjusting factor is added in to make the minimum value equal -128dB vDSP_vsadd(tmpData, 1, &mAdjust0DB, tmpData, 1, mFFTLength); Float32 one = 1; vDSP_vdbcon(tmpData, 1, &one, tmpData, 1, mFFTLength, 0); //Convert floating point data to integer (Q7.24) vDSP_vsmul(tmpData, 1, &m24BitFracScale, tmpData, 1, mFFTLength); for(UInt32 i=0; i<mFFTLength; ++i) outFFTData[i] = (SInt32) tmpData[i]; for(int i=0;i<mFFTLength/4;i++) { printf("%i:::%i\n",i,outFFTData[i]/16777216+120); } OSAtomicDecrement32Barrier(&mHasAudioData); OSAtomicIncrement32Barrier(&mNeedsAudioData); mAudioBufferCurrentIndex = 0; return true; } else if (mNeedsAudioData == 0) OSAtomicIncrement32Barrier(&mNeedsAudioData); return false; }
bool SFB::Audio::ReplayGainAnalyzer::AnalyzeURL(CFURLRef url, CFErrorRef *error) { if(nullptr == url) return false; auto decoder = Decoder::CreateDecoderForURL(url, error); if(!decoder || !decoder->Open(error)) return false; AudioStreamBasicDescription inputFormat = decoder->GetFormat(); // Higher sampling rates aren't natively supported but are handled via resampling int32_t decoderSampleRate = (int32_t)inputFormat.mSampleRate; bool validSampleRate = EvenMultipleSampleRateIsSupported(decoderSampleRate); if(!validSampleRate) { if(error) { SFB::CFString description = CFCopyLocalizedString(CFSTR("The file “%@” does not contain audio at a supported sample rate."), ""); SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("Only sample rates of 8.0 KHz, 11.025 KHz, 12.0 KHz, 16.0 KHz, 22.05 KHz, 24.0 KHz, 32.0 KHz, 44.1 KHz, 48 KHz and multiples are supported."), ""); SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(ReplayGainAnalyzer::ErrorDomain, ReplayGainAnalyzer::FileFormatNotSupportedError, description, url, failureReason, recoverySuggestion); } return false; } Float64 replayGainSampleRate = GetBestReplayGainSampleRateForSampleRate(decoderSampleRate); if(!(1 == inputFormat.mChannelsPerFrame || 2 == inputFormat.mChannelsPerFrame)) { if(error) { SFB::CFString description = CFCopyLocalizedString(CFSTR("The file “%@” does not contain mono or stereo audio."), ""); SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("Only mono or stereo files supported"), ""); SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(ReplayGainAnalyzer::ErrorDomain, ReplayGainAnalyzer::FileFormatNotSupportedError, description, url, failureReason, recoverySuggestion); } return false; } AudioStreamBasicDescription outputFormat = { .mFormatID = kAudioFormatLinearPCM, .mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved, .mReserved = 0, .mSampleRate = replayGainSampleRate, .mChannelsPerFrame = inputFormat.mChannelsPerFrame, .mBitsPerChannel = 32, .mBytesPerPacket = 4, .mBytesPerFrame = 4, .mFramesPerPacket = 1 }; if(!SetSampleRate((int32_t)outputFormat.mSampleRate)) { if(error) { SFB::CFString description = CFCopyLocalizedString(CFSTR("The file “%@” does not contain audio at a supported sample rate."), ""); SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("Only sample rates of 8.0 KHz, 11.025 KHz, 12.0 KHz, 16.0 KHz, 22.05 KHz, 24.0 KHz, 32.0 KHz, 44.1 KHz, 48 KHz and multiples are supported."), ""); SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(ReplayGainAnalyzer::ErrorDomain, ReplayGainAnalyzer::FileFormatNotSupportedError, description, url, failureReason, recoverySuggestion); } return false; } // Converter takes ownership of decoder Converter converter(std::move(decoder), outputFormat); if(!converter.Open(error)) return false; const UInt32 bufferSizeFrames = 512; BufferList outputBuffer(outputFormat, bufferSizeFrames); bool isStereo = (2 == outputFormat.mChannelsPerFrame); for(;;) { UInt32 frameCount = converter.ConvertAudio(outputBuffer, bufferSizeFrames); if(0 == frameCount) break; // Find the peak sample magnitude float lpeak, rpeak; vDSP_maxmgv((const float *)outputBuffer->mBuffers[0].mData, 1, &lpeak, frameCount); if(isStereo) { vDSP_maxmgv((const float *)outputBuffer->mBuffers[1].mData, 1, &rpeak, frameCount); priv->trackPeak = std::max(priv->trackPeak, std::max(lpeak, rpeak)); } else priv->trackPeak = std::max(priv->trackPeak, lpeak); // The replay gain analyzer expects 16-bit sample size passed as floats const float scale = 1u << 15; vDSP_vsmul((const float *)outputBuffer->mBuffers[0].mData, 1, &scale, (float *)outputBuffer->mBuffers[0].mData, 1, frameCount); if(isStereo) { vDSP_vsmul((const float *)outputBuffer->mBuffers[1].mData, 1, &scale, (float *)outputBuffer->mBuffers[1].mData, 1, frameCount); AnalyzeSamples((const float *)outputBuffer->mBuffers[0].mData, (const float *)outputBuffer->mBuffers[1].mData, frameCount, true); } else AnalyzeSamples((const float *)outputBuffer->mBuffers[0].mData, nullptr, frameCount, false); } priv->albumPeak = std::max(priv->albumPeak, priv->trackPeak); return true; } bool SFB::Audio::ReplayGainAnalyzer::GetTrackGain(float& trackGain) { if(!analyzeResult(priv->A, sizeof(priv->A) / sizeof(*(priv->A)), trackGain)) return false; for(uint32_t i = 0; i < sizeof(priv->A) / sizeof(*(priv->A)); ++i) { priv->B[i] += priv->A[i]; priv->A[i] = 0; } priv->Zero(); priv->totsamp = 0; priv->lsum = priv->rsum = 0.; return true; } bool SFB::Audio::ReplayGainAnalyzer::GetTrackPeak(float& trackPeak) { trackPeak = priv->trackPeak; priv->trackPeak = 0.; return true; }
TriangleFilterBank32 *TriangleFilterBank32_new(size_t filterCount, size_t magnitudeFrameSize, size_t samplerate) { TriangleFilterBank32 *self = calloc(1, sizeof(TriangleFilterBank32)); self->filterCount = filterCount; self->filterFrequencyCount = filterCount + 2; self->magnitudeFrameSize = magnitudeFrameSize; self->samplerate = samplerate; self->filterFrequencies = calloc(self->filterFrequencyCount, sizeof(Float32)); self->filterBank = calloc(self->magnitudeFrameSize * self->filterCount, sizeof(Float32)); Float32 *filterTemp = calloc(self->magnitudeFrameSize * self->filterCount, sizeof(Float32)); Float32 one = 2; Float32 zero = 0; vDSP_vgen(&zero, &one, self->filterFrequencies, 1, self->filterFrequencyCount); for (size_t i = 0; i < self->filterFrequencyCount; ++i) { self->filterFrequencies[i] = powf(10, self->filterFrequencies[i]); } Float32 minusFirstElement = -self->filterFrequencies[0]; vDSP_vsadd(self->filterFrequencies, 1, &minusFirstElement, self->filterFrequencies, 1, self->filterFrequencyCount); Float32 maximum = self->filterFrequencies[self->filterFrequencyCount - 1]; vDSP_vsdiv(self->filterFrequencies, 1, &maximum, self->filterFrequencies, 1, self->filterFrequencyCount); Float32 nyquist = self->samplerate / 2; vDSP_vsmul(self->filterFrequencies, 1, &nyquist, self->filterFrequencies, 1, self->filterFrequencyCount); Float32 *magnitudeFrequencies = calloc(self->magnitudeFrameSize, sizeof(Float32)); vDSP_vgen(&zero, &nyquist, magnitudeFrequencies, 1, self->magnitudeFrameSize); Float32 *lower = calloc(self->filterCount, sizeof(Float32)); Float32 *center = calloc(self->filterCount, sizeof(Float32)); Float32 *upper = calloc(self->filterCount, sizeof(Float32)); Float32 *temp1 = (Float32 *)calloc(self->magnitudeFrameSize, sizeof(Float32)); Float32 *temp2 = (Float32 *)calloc(self->magnitudeFrameSize, sizeof(Float32)); cblas_scopy((SInt32)self->filterCount, &self->filterFrequencies[0], 1, lower, 1); cblas_scopy((SInt32)self->filterCount, &self->filterFrequencies[1], 1, center, 1); cblas_scopy((SInt32)self->filterCount, &self->filterFrequencies[2], 1, upper, 1); for (size_t i = 0; i < self->filterCount; ++i) { Float32 negateLowerValue = -lower[i]; vDSP_vsadd(magnitudeFrequencies, 1, &negateLowerValue, temp1, 1, self->magnitudeFrameSize); Float32 divider = center[i] - lower[i]; vDSP_vsdiv(temp1, 1, ÷r, temp1, 1, self->magnitudeFrameSize); for (size_t j = 0; j < self->magnitudeFrameSize; ++j) { if (!(magnitudeFrequencies[j] > lower[i] && magnitudeFrequencies[j] <= center[i])) { temp1[j] = 0; } } Float32 minusOne = -1; vDSP_vsmul(magnitudeFrequencies, 1, &minusOne, temp2, 1, self->magnitudeFrameSize); vDSP_vsadd(temp2, 1, &upper[i], temp2, 1, self->magnitudeFrameSize); divider = upper[i] - center[i]; vDSP_vsdiv(temp2, 1, ÷r, temp2, 1, self->magnitudeFrameSize); for (size_t j = 0; j < self->magnitudeFrameSize; ++j) { if (!(magnitudeFrequencies[j] > center[i] && magnitudeFrequencies[j] <= upper[i])) { temp2[j] = 0; } } vDSP_vadd(temp1, 1, temp2, 1, &filterTemp[i * self->magnitudeFrameSize], 1, self->magnitudeFrameSize); } vDSP_mtrans(filterTemp, 1, self->filterBank, 1, self->magnitudeFrameSize, self->filterCount); free(lower); free(center); free(upper); free(temp1); free(temp2); free(magnitudeFrequencies); free(filterTemp); return self; }
inline void maxiCollider::createGabor(flArr &atom, const float freq, const float sampleRate, const uint length, float startPhase, const float kurtotis, const float amp) { atom.resize(length); flArr sine; sine.resize(length); // float gausDivisor = (-2.0 * kurtotis * kurtotis); // float phase =-1.0; double *env = maxiCollider::envCache.getWindow(length); #ifdef __APPLE_CC__ vDSP_vdpsp(env, 1, &atom[0], 1, length); #else for(uint i=0; i < length; i++) { atom[i] = env[i]; } #endif //#ifdef __APPLE_CC__ // vDSP_vramp(&phase, &inc, &atom[0], 1, length); // vDSP_vsq(&atom[0], 1, &atom[0], 1, length); // vDSP_vsdiv(&atom[0], 1, &gausDivisor, &atom[0], 1, length); // for(uint i=0; i < length; i++) atom[i] = exp(atom[i]); //#else // for(uint i=0; i < length; i++) { // //gaussian envelope // atom[i] = exp((phase* phase) / gausDivisor); // phase += inc; // } //#endif float cycleLen = sampleRate / freq; float maxPhase = length / cycleLen; float inc = 1.0 / length; #ifdef __APPLE_CC__ flArr interpConstants; interpConstants.resize(length); float phase = 0.0; vDSP_vramp(&phase, &inc, &interpConstants[0], 1, length); vDSP_vsmsa(&interpConstants[0], 1, &maxPhase, &startPhase, &interpConstants[0], 1, length); float waveTableLength = 512; vDSP_vsmul(&interpConstants[0], 1, &waveTableLength, &interpConstants[0], 1, length); for(uint i=0; i < length; i++) { interpConstants[i] = fmod(interpConstants[i], 512.0f); } vDSP_vlint(sineBuffer2, &interpConstants[0], 1, &sine[0], 1, length, 514); vDSP_vmul(&atom[0], 1, &sine[0], 1, &atom[0], 1, length); vDSP_vsmul(&atom[0], 1, &, &atom[0], 1, length); #else maxPhase *= TWOPI; for(uint i=0; i < length; i++) { //multiply by sinewave float x = inc * i; sine[i] = sin((x * maxPhase) + startPhase); } for(uint i=0; i < length; i++) { atom[i] *= sine[i]; atom[i] *= amp; } #endif }