Пример #1
0
/* Calculate the power spectrum */
void fft::powerSpectrum_vdsp(int start, float *data, float *window, float *magnitude,float *phase) {
	
    uint32_t        i;
	
	//multiply by window
	vDSP_vmul(data, 1, window, 1, in_real, 1, n);

	//convert to split complex format - evens and odds
	vDSP_ctoz((COMPLEX *) in_real, 2, &A, 1, half);
	
	
	//calc fft
	vDSP_fft_zrip(setupReal, &A, 1, log2n, FFT_FORWARD);
	
	//scale by 2 (see vDSP docs)
	static float scale=0.5 ;
	vDSP_vsmul(A.realp, 1, &scale, A.realp, 1, half);
	vDSP_vsmul(A.imagp, 1, &scale, A.imagp, 1, half);

	//back to split complex format
	vDSP_ztoc(&A, 1, (COMPLEX*) out_real, 2, half);
	
	//convert to polar
	vDSP_polar(out_real, 2, polar, 2, half);
	
	for (i = 0; i < half; i++) {
		magnitude[i]=polar[2*i]+1.0;
		phase[i]=polar[2*i + 1];
	}
	
	
	
}
Пример #2
0
void FFTFrame::doInverseFFT(float* data) {
  vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_INVERSE);
  vDSP_ztoc(&m_frame, 1, (DSPComplex*)data, 2, m_FFTSize / 2);

  // Do final scaling so that x == IFFT(FFT(x))
  float scale = 1.0f / m_FFTSize;
  VectorMath::vsmul(data, 1, &scale, data, 1, m_FFTSize);
}
Пример #3
0
void FFT::performIFFT (float* fftBuffer)
{
    SplitComplex split;
    split.realp = fftBuffer;
	split.imagp = fftBuffer + properties.fftSizeHalved;

    jassert (split.realp != bufferSplit.realp); // These can't point to the same data!

	vDSP_fft_zrip (config, &split, 1, properties.fftSizeLog2, FFT_INVERSE);
    vDSP_ztoc (&split, 1, (COMPLEX*) buffer.getData(), 2, properties.fftSizeHalved);
}
Пример #4
0
void ifft(FFT_FRAME* frame, float* outputBuffer)
{
    // get pointer to fft object
    FFT* fft = frame->fft;

    // perform in-place fft inverse
    vDSP_fft_zrip(fft->fftSetup, &frame->buffer, 1, fft->logTwo, FFT_INVERSE);
    
    // The output signal is now in a split real form.  Use the  function vDSP_ztoc to get a split real vector. 
    vDSP_ztoc(&frame->buffer, 1, (COMPLEX *)outputBuffer, 2, fft->sizeOverTwo);
    
    // This applies the windowing
    if (fft->window != NULL)
    	vDSP_vmul(outputBuffer, 1, fft->window, 1, outputBuffer, 1, fft->size);
}
Пример #5
0
void fft::inversePowerSpectrum_vdsp(int start, float *finalOut, float *window, float *magnitude,float *phase) {
	uint32_t i;
	
	for (i = 0; i < half; i++) {
//		polar[2*i] = pow(10.0, magnitude[i] / 20.0) - 1.0;
		polar[2*i] = magnitude[i] - 1.0;
		polar[2*i + 1] = phase[i];
	}	
	
	vDSP_rect(polar, 2, in_real, 2, half);
	
	vDSP_ctoz((COMPLEX*) in_real, 2, &A, 1, half);
	vDSP_fft_zrip(setupReal, &A, 1, log2n, FFT_INVERSE);
	vDSP_ztoc(&A, 1, (COMPLEX*) out_real, 2, half);
	
	static float scale = 1./n;
	vDSP_vsmul(out_real, 1, &scale, out_real, 1, n);

	//multiply by window
	vDSP_vmul(out_real, 1, window, 1, finalOut, 1, n);
	
}
/*	Demonstrate the real-to-complex one-dimensional in-place FFT,
	vDSP_fft_zrip.

	The in-place FFT writes results into the same array that contains the
	input data.

	Applications may need to rearrange data before calling the
	real-to-complex FFT.  This is because the vDSP FFT routines currently
	use a separated-data complex format, in which real components and
	imaginary components are stored in different arrays.  For the
	real-to-complex FFT, real data passed using the same arrangements used
	for complex data.  (This is largely due to the nature of the algorithm
	used in performing in the real-to-complex FFT.) The mapping puts
	even-indexed elements of the real data in real components of the
	complex data and odd-indexed elements of the real data in imaginary
	components of the complex data.

	(It is possible to improve this situation by implementing
	interleaved-data complex format.  If you would benefit from such
	routines, please enter an enhancement request at
	http://developer.apple.com/bugreporter.)

	If an application's real data is stored sequentially in an array (as is
	common) and the design cannot be altered to provide data in the
	even-odd split configuration, then the data can be moved using the
	routine vDSP_ctoz.

	The output of the real-to-complex FFT contains only the first N/2
	complex elements, with one exception.  This is because the second N/2
	elements are complex conjugates of the first N/2 elements, so they are
	redundant.

	The exception is that the imaginary parts of elements 0 and N/2 are
	zero, so only the real parts are provided.  The real part of element
	N/2 is stored in the space that would be used for the imaginary part of
	element 0.

	See the vDSP Library manual for illustration and additional
	information.
*/
static void DemonstratevDSP_fft_zrip(FFTSetup Setup)
{
	/*	Define a stride for the array be passed to the FFT.  In many
		applications, the stride is one and is passed to the vDSP
		routine as a constant.
	*/
	const vDSP_Stride Stride = 1;

	// Define a variable for a loop iterator.
	vDSP_Length i;

	// Define some variables used to time the routine.
	ClockData t0, t1;
	double Time;

	printf("\n\tOne-dimensional real FFT of %lu elements.\n",
		(unsigned long) N);

	// Allocate memory for the arrays.
	float *Signal = malloc(N * Stride * sizeof Signal);
	float *ObservedMemory = malloc(N * sizeof *ObservedMemory);

	if (ObservedMemory == NULL || Signal == NULL)
	{
		fprintf(stderr, "Error, failed to allocate memory.\n");
		exit(EXIT_FAILURE);
	}

	// Assign half of ObservedMemory to reals and half to imaginaries.
	DSPSplitComplex Observed = { ObservedMemory, ObservedMemory + N/2 };

	/*	Generate an input signal.  In a real application, data would of
		course be provided from an image file, sensors, or other source.
	*/
	const float Frequency0 = 79, Frequency1 = 296, Frequency2 = 143;
	const float Phase0 = 0, Phase1 = .2f, Phase2 = .6f;
	for (i = 0; i < N; ++i)
		Signal[i*Stride] =
			  cos((i * Frequency0 / N + Phase0) * TwoPi)
			+ cos((i * Frequency1 / N + Phase1) * TwoPi)
			+ cos((i * Frequency2 / N + Phase2) * TwoPi);

	/*	Reinterpret the real signal as an interleaved-data complex
		vector and use vDSP_ctoz to move the data to a separated-data
		complex vector.  This puts the even-indexed elements of Signal
		in Observed.realp and the odd-indexed elements in
		Observed.imagp.

		Note that we pass vDSP_ctoz two times Signal's normal stride,
		because ctoz skips through a complex vector from real to real,
		skipping imaginary elements.  Considering this as a stride of
		two real-sized elements rather than one complex element is a
		legacy use.

		In the destination array, a stride of one is used regardless of
		the source stride.  Since the destination is a buffer allocated
		just for this purpose, there is no point in replicating the
		source stride.
	*/
	vDSP_ctoz((DSPComplex *) Signal, 2*Stride, &Observed, 1, N/2);

	// Perform a real-to-complex FFT.
	vDSP_fft_zrip(Setup, &Observed, 1, Log2N, FFT_FORWARD);

	/*	Prepare expected results based on analytical transformation of
		the input signal.
	*/
	float *ExpectedMemory = malloc(N * sizeof *ExpectedMemory);
	if (ExpectedMemory == NULL)
	{
		fprintf(stderr, "Error, failed to allocate memory.\n");
		exit(EXIT_FAILURE);
	}

	// Assign half of ExpectedMemory to reals and half to imaginaries.
	DSPSplitComplex Expected = { ExpectedMemory, ExpectedMemory + N/2 };

	for (i = 0; i < N/2; ++i)
		Expected.realp[i] = Expected.imagp[i] = 0;

	// Add the frequencies in the signal to the expected results.
	Expected.realp[(int) Frequency0] = N * cos(Phase0 * TwoPi);
	Expected.imagp[(int) Frequency0] = N * sin(Phase0 * TwoPi);

	Expected.realp[(int) Frequency1] = N * cos(Phase1 * TwoPi);
	Expected.imagp[(int) Frequency1] = N * sin(Phase1 * TwoPi);

	Expected.realp[(int) Frequency2] = N * cos(Phase2 * TwoPi);
	Expected.imagp[(int) Frequency2] = N * sin(Phase2 * TwoPi);

	// Compare the observed results to the expected results.
	CompareComplexVectors(Expected, Observed, N/2);

	// Release memory.
	free(ExpectedMemory);

	/*	The above shows how to use the vDSP_fft_zrip routine.  Now we
		will see how fast it is.
	*/

	/*	Zero the signal before timing because repeated FFTs on non-zero
		data can cause abnormalities such as infinities, NaNs, and
		subnormal numbers.
	*/
	for (i = 0; i < N; ++i)
		Signal[i*Stride] = 0;
	vDSP_ctoz((DSPComplex *) Signal, 2*Stride, &Observed, 1, N/2);

	// Time vDSP_fft_zrip by itself.

	t0 = Clock();

	for (i = 0; i < Iterations; ++i)
		vDSP_fft_zrip(Setup, &Observed, 1, Log2N, FFT_FORWARD);

	t1 = Clock();

	// Average the time over all the loop iterations.
	Time = ClockToSeconds(t1, t0) / Iterations;

	printf("\tvDSP_fft_zrip on %lu elements takes %g microseconds.\n",
		(unsigned long) N, Time * 1e6);

	// Time vDSP_fft_zrip with the vDSP_ctoz and vDSP_ztoc transformations.

	t0 = Clock();

	for (i = 0; i < Iterations; ++i)
	{
		vDSP_ctoz((DSPComplex *) Signal, 2*Stride, &Observed, 1, N/2);
		vDSP_fft_zrip(Setup, &Observed, 1, Log2N, FFT_FORWARD);
		vDSP_ztoc(&Observed, 1, (DSPComplex *) Signal, 2*Stride, N/2);
	}

	t1 = Clock();

	// Average the time over all the loop iterations.
	Time = ClockToSeconds(t1, t0) / Iterations;

	printf(
"\tvDSP_fft_zrip with vDSP_ctoz and vDSP_ztoc takes %g microseconds.\n",
		Time * 1e6);

	// Release resources.
	free(ObservedMemory);
	free(Signal);
}
Пример #7
0
ITunesPixelFormat ivis_render( ITunesVis* plugin, short audio_data[][512], float freq_data[][512],
                               void* buffer, long buffer_size, bool idle )
{
  ITunesPixelFormat format = ITunesPixelFormatUnknown;

  /* make sure we have a plugin and a visual handler */
  if ( !plugin || !plugin->imports.visual_handler )
    return format;

  int i=0, w=0;
  RenderVisualData visual_data;
  DSPSplitComplex splitComplex[2];
  float *data[2];

  /* perform FFT if we're not idling */
  if ( ! idle )
  {
    /* allocate some complex vars */
    for ( i = 0 ; i < 2 ; i++ )
    {
      splitComplex[i].realp = calloc( 512, sizeof(float) );
      splitComplex[i].imagp = calloc( 512, sizeof(float) );
      data[i] = calloc( 512, sizeof(float) );
    }

    /* 2 channels for spectrum and waveform data */
    visual_data.numWaveformChannels = 2;
    visual_data.numSpectrumChannels = 2;

    /* copy spectrum audio data to visual data strucure */
    for ( w = 0 ; w < 512 ; w++ )
    {
      /* iTunes visualizers expect waveform data from 0 - 255, with level 0 at 128 */
      visual_data.waveformData[0][w] = (UInt8)( (long)(audio_data[0][w]) / 128 + 128 );
      visual_data.waveformData[1][w] = (UInt8)( (long)(audio_data[1][w]) / 128 + 128 );

      /* scale to -1, +1 */
      *( data[0] + w ) = (float)(( audio_data[0][w]) / (2.0 * 8192.0) );
      *( data[1] + w ) = (float)(( audio_data[1][w]) / (2.0 * 8192.0) );
    }

    /* FFT scaler */
    float scale = ( 1.0 / 1024.0 ) ; /* scale by length of input * 2 (due to how vDSP does FFTs) */
    float nyq=0, dc=0, freq=0;

    for ( i = 0 ; i < 2 ; i++ )
    {
      /* pack data into format fft_zrip expects it */
      vDSP_ctoz( (COMPLEX*)( data[i] ), 2, &( splitComplex[i] ), 1, 256 );

      /* perform FFT on normalized audio data */
      fft_zrip( plugin->fft_setup, &( splitComplex[i] ), 1, 9, FFT_FORWARD );

      /* scale the values */
      vDSP_vsmul( splitComplex[i].realp, 1, &scale, splitComplex[i].realp, 1, 256 );
      vDSP_vsmul( splitComplex[i].imagp, 1, &scale, splitComplex[i].imagp, 1, 256 );

      /* unpack data */
      vDSP_ztoc( &splitComplex[i], 1, (COMPLEX*)( data[i] ), 2, 256 );

      /* ignore phase */
      dc = *(data[i]) = fabs( *(data[i]) );
      nyq = fabs( *(data[i] + 1) );

      for ( w = 1 ; w < 256 ; w++ )
      {
        /* don't use vDSP for this since there's some overflow */
        freq = hypot( *(data[i] + w * 2), *(data[i] + w * 2 + 1) ) * 256 * 16;
        freq = MAX( 0, freq );
        freq = MIN( 255, freq );
        visual_data.spectrumData[i][ w - 1 ] = (UInt8)( freq );
      }
      visual_data.spectrumData[i][256] = nyq;
    }

    /* deallocate complex vars */
    for ( i = 0 ; i < 2 ; i++ )
    {
      free( splitComplex[i].realp );
      free( splitComplex[i].imagp );
      free( data[i] );
    }

    /* update the render message with the new visual data and timestamp */
    plugin->visual_message.u.renderMessage.renderData = &visual_data;
    plugin->visual_message.u.renderMessage.timeStampID++;
  }

  /* update time */
  plugin->visual_message.u.renderMessage.currentPositionInMS =
    ivis_current_time() - plugin->start_time; // FIXME: real time

  /* save our GL context and send the vis a render message */
  CGLContextObj currentContext = CGLGetCurrentContext();
  if ( plugin->gl_context )
    aglSetCurrentContext( (AGLContext)(plugin->gl_context ) );

  /* call the plugin's render method */
  if ( idle )
  {
    /* idle message */
    if ( plugin->wants_idle )
      plugin->imports.visual_handler( kVisualPluginIdleMessage,
                                      &( plugin->visual_message ),
                                      plugin->vis_ref );
  }
  else
  {
    /* render message */
    plugin->imports.visual_handler( kVisualPluginRenderMessage,
                                    &( plugin->visual_message ),
                                    plugin->vis_ref );

    /* set position message */
    plugin->visual_message.u.setPositionMessage.positionTimeInMS
      = plugin->visual_message.u.renderMessage.currentPositionInMS;
    plugin->imports.visual_handler( kVisualPluginSetPositionMessage, &( plugin->visual_message ),
                                    plugin->vis_ref );
  }
  /* update message */
  plugin->imports.visual_handler( kVisualPluginUpdateMessage, NULL,
                                  plugin->vis_ref );

  /* read pixels and restore our GL context */
  CGLLockContext( CGLGetCurrentContext() );

  switch ( get_pixels( buffer, buffer_size, CGLGetCurrentContext() != currentContext ) )
  {
  case 3:
    format = ITunesPixelFormatRGB24;
    break;

  case 4:
    format = ITunesPixelFormatRGBA32;
    break;

  default:
    break;
  }

  CGLUnlockContext ( CGLGetCurrentContext() );

  /* restore our GL context */
  CGLSetCurrentContext( currentContext );
  return format;
}