예제 #1
0
파일: fft.cpp 프로젝트: arturoc/Maximilian
void fft::convToDB_vdsp(float *in, float *out) {
	float ref = 1.0;
	vDSP_vdbcon(in, 1, &ref, out, 1, half, 1);
	//get rid of any -infs
	float vmin=0.0;
	float vmax=9999999.0;
	vDSP_vclip(out, 1, &vmin, &vmax, out, 1, half);
}
예제 #2
0
void vclip(const float* sourceP,
           int sourceStride,
           const float* lowThresholdP,
           const float* highThresholdP,
           float* destP,
           int destStride,
           size_t framesToProcess) {
    vDSP_vclip(const_cast<float*>(sourceP), sourceStride,
               const_cast<float*>(lowThresholdP),
               const_cast<float*>(highThresholdP), destP, destStride,
               framesToProcess);
}
예제 #3
0
void DspClip::processScalar(DspObject *dspObject, int fromIndex, int toIndex) {
  DspClip *d = reinterpret_cast<DspClip *>(dspObject);
  #if __APPLE__
  vDSP_vclip(d->dspBufferAtInlet[0]+fromIndex, 1, &(d->lowerBound), &(d->upperBound),
      d->dspBufferAtOutlet[0]+fromIndex, 1, toIndex-fromIndex);
  #else
  float *in = d->dspBufferAtInlet[0];
  float *out = d->dspBufferAtOutlet[0];
  for (int i = fromIndex; i < toIndex; i++) {
    if (in[i] < d->lowerBound) {
      out[i] = d->lowerBound;
    } else if (in[i] > d->upperBound) {
      out[i] = d->upperBound;
    } else {
      out[i] = in[i];
    }
  }
  #endif
}
예제 #4
0
UInt32 SFB::Audio::MusepackDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount)
{
	if(bufferList->mNumberBuffers != mFormat.mChannelsPerFrame) {
		LOGGER_WARNING("org.sbooth.AudioEngine.Decoder.Musepack", "_ReadAudio() called with invalid parameters");
		return 0;
	}

	MPC_SAMPLE_FORMAT	buffer			[MPC_DECODER_BUFFER_LENGTH];
	UInt32				framesRead		= 0;
	
	// Reset output buffer data size
	for(UInt32 i = 0; i < bufferList->mNumberBuffers; ++i)
		bufferList->mBuffers[i].mDataByteSize = 0;
	
	for(;;) {
		UInt32	framesRemaining	= frameCount - framesRead;
		UInt32	framesToSkip	= (UInt32)(bufferList->mBuffers[0].mDataByteSize / sizeof(float));
		UInt32	framesInBuffer	= (UInt32)(mBufferList->mBuffers[0].mDataByteSize / sizeof(float));
		UInt32	framesToCopy	= std::min(framesInBuffer, framesRemaining);
		
		// Copy data from the buffer to output
		for(UInt32 i = 0; i < mBufferList->mNumberBuffers; ++i) {
			float *floatBuffer = (float *)bufferList->mBuffers[i].mData;
			memcpy(floatBuffer + framesToSkip, mBufferList->mBuffers[i].mData, framesToCopy * sizeof(float));
			bufferList->mBuffers[i].mDataByteSize += framesToCopy * sizeof(float);
			
			// Move remaining data in buffer to beginning
			if(framesToCopy != framesInBuffer) {
				floatBuffer = (float *)mBufferList->mBuffers[i].mData;
				memmove(floatBuffer, floatBuffer + framesToCopy, (framesInBuffer - framesToCopy) * sizeof(float));
			}
			
			mBufferList->mBuffers[i].mDataByteSize -= framesToCopy * sizeof(float);
		}
		
		framesRead += framesToCopy;
		
		// All requested frames were read
		if(framesRead == frameCount)
			break;
		
		// Decode one frame of MPC data
		mpc_frame_info frame;
		frame.buffer = buffer;

		mpc_status result = mpc_demux_decode(mDemux, &frame);
		if(MPC_STATUS_OK != result) {
			LOGGER_ERR("org.sbooth.AudioEngine.Decoder.Musepack", "Musepack decoding error");
			break;
		}

		// End of input
		if(-1 == frame.bits)
			break;
		
#ifdef MPC_FIXED_POINT
#error "Fixed point not yet supported"
#else
		float *inputBuffer = (float *)buffer;

		// Clip the samples to [-1, 1)
		float minValue = -1.f;
		float maxValue = 8388607.f / 8388608.f;

		vDSP_vclip(inputBuffer, 1, &minValue, &maxValue, inputBuffer, 1, frame.samples * mFormat.mChannelsPerFrame);

		// Deinterleave the normalized samples
		for(UInt32 channel = 0; channel < mFormat.mChannelsPerFrame; ++channel) {
			float *floatBuffer = (float *)mBufferList->mBuffers[channel].mData;

			for(UInt32 sample = channel; sample < frame.samples * mFormat.mChannelsPerFrame; sample += mFormat.mChannelsPerFrame)
				*floatBuffer++ = inputBuffer[sample];
			
			mBufferList->mBuffers[channel].mNumberChannels	= 1;
			mBufferList->mBuffers[channel].mDataByteSize	= frame.samples * sizeof(float);
		}
#endif /* MPC_FIXED_POINT */		
	}
	
	mCurrentFrame += framesRead;
	
	return framesRead;
}
예제 #5
0
bool ofxAudioUnitFftNode::getAmplitude(std::vector<float> &outAmplitude)
{
	getSamplesFromChannel(_sampleBuffer, 0);
	
	// return empty if we don't have enough samples yet
	if(_sampleBuffer.size() < _N) {
		outAmplitude.clear();
		return false;
	}
	
	// normalize input waveform
	if(_outputSettings.normalizeInput) {
		float timeDomainMax;
		vDSP_maxv(&_sampleBuffer[0], 1, &timeDomainMax, _N);
		vDSP_vsdiv(&_sampleBuffer[0], 1, &timeDomainMax, &_sampleBuffer[0], 1, _N);
	}
	
	PerformFFT(&_sampleBuffer[0], _window, _fftData, _fftSetup, _N);
	
	// get amplitude
	vDSP_zvmags(&_fftData, 1, _fftData.realp, 1, _N/2);
	
	// normalize magnitudes
	float two = 2.0;
	vDSP_vsdiv(_fftData.realp, 1, &two, _fftData.realp, 1, _N/2);

	// scale output according to requested settings
	if(_outputSettings.scale == OFXAU_SCALE_LOG10) {
		for(int i = 0; i < (_N / 2); i++) {
			_fftData.realp[i] = log10f(_fftData.realp[i] + 1);
		}
	} else if(_outputSettings.scale == OFXAU_SCALE_DECIBEL) {
		float ref = 1.0;
		vDSP_vdbcon(_fftData.realp, 1, &ref, _fftData.realp, 1, _N / 2, 1);
		
		float dbCorrectionFactor = 0;
		switch (_outputSettings.window) {
			case OFXAU_WINDOW_HAMMING:
				dbCorrectionFactor = DB_CORRECTION_HAMMING;
				break;
			case OFXAU_WINDOW_HANNING:
				dbCorrectionFactor = DB_CORRECTION_HAMMING;
				break;
			case OFXAU_WINDOW_BLACKMAN:
				dbCorrectionFactor = DB_CORRECTION_HAMMING;
				break;
		}
		
		vDSP_vsadd(_fftData.realp, 1, &dbCorrectionFactor, _fftData.realp, 1, _N / 2);
	}
	
	// restrict minimum to 0
	if(_outputSettings.clampMinToZero) {
		float min = 0.0;
		float max = INFINITY;
		vDSP_vclip(_fftData.realp, 1, &min, &max, _fftData.realp, 1, _N / 2);
	}
	
	// normalize output between 0 and 1
	if(_outputSettings.normalizeOutput) {
		float max;
		vDSP_maxv(_fftData.realp, 1, &max, _N / 2);
		if(max > 0) {
			vDSP_vsdiv(_fftData.realp, 1, &max, _fftData.realp, 1, _N / 2);
		}
	}
	
	outAmplitude.assign(_fftData.realp, _fftData.realp + _N/2);
	return true;
}