コード例 #1
0
void _fft(cplx buf[], cplx out[], int n, int step)
{
	if (step < n) {
		_fft(out, buf, n, step * 2);
		_fft(out + step, buf + step, n, step * 2);

		for (int i = 0; i < n; i += 2 * step) {
			cplx t = cexp(-I * PI * i / n) * out[i + step];
			buf[i / 2]     = out[i] + t;
			buf[(i + n)/2] = out[i] - t;
		}
	}
}
コード例 #2
0
void _fft( double complex* buf, double complex* out, int n, int step)
{
    if (step < n) {
        _fft( out, buf, n, step * 2);
        _fft( out + step, buf + step, n, step * 2);

        for ( int i = 0; i < n; i += 2 * step )
        {
            double complex t = cexp(-I * PI * i / n) * out[i + step];
            buf[i / 2] = *(out + i) + t;
            buf[(i+n)/2] = out[i] - t;
        }
    }
}
コード例 #3
0
ファイル: cali_sar_kalman.c プロジェクト: chen116/ccgrid
void fft(cplx buf[], int n)
{
        int i;
	cplx out[n];
	for (i = 0; i < n; i++) out[i] = buf[i];
 
	_fft(buf, out, n, 1);
}
コード例 #4
0
void fft( double complex* data_in)
{
     double complex* out = calloc(WS, sizeof(double complex));
     for (int i = 0; i < WS; i++)
     {
         *(out + i) = *(data_in + i);
     }
     _fft(data_in, out, WS, 1);
}
コード例 #5
0
ファイル: fft.c プロジェクト: egawata/dft
complex fft(int n, int log2n, double *data) 
{
    if ( n < 0 ||  n >= (1 << (log2n) ) ) {
        fprintf(stderr, "_fft() : index no. [%d] is invalid.", n);
        exit(1);
    }

    n = bit_reverse(n, log2n);

    return _fft(n, 0, log2n, data);
}
コード例 #6
0
ファイル: fft.c プロジェクト: egawata/dft
/* 
 *  高速フーリエ変換を行う。
 *
 *  実際にこの fft 関数が行うのは、1段階のFFTのみで、
 *  多段で実行するために、再帰呼び出しが行われている。
 *  TODO: 再帰を使わないほうが効率良さそうな気がするので、改良の余地あり。
 *
 *  n     : 求めたい周波数成分( 0 <= n < 2^log2n )
 *  depth : 段数。再帰呼び出しの途中で fft()関数の内部から呼び出されるのでない限り、
 *          つまりfft() 関数の外部から呼び出される限りは、0 を指定。
 *  log2n : 入力データの数を、2の対数表記したもの。
 *          入力データ数が64個であれば、log2n = 6 となる。(2^6 = 64)
 *  data  : 入力データを格納する配列の先頭アドレス。
 *          この配列には 2 ^ log2n 個のデータが格納されていなければならない。
 *
 *  戻り値
 *    演算結果が complex 型で返される。 
 *
 */
complex _fft(int n, int depth, int log2n, double *data) 
{
    if (depth == log2n) {
        return *(data + n);
    }
    else {
        int m = 1 << depth;     //  2 の depth 乗
        int m2 = m << 1;

        if (n & m) {
            complex W = cexp(- PI2 * I * (n & (m-1)) / m2);
            return W * ( 
                      _fft(n - m, depth+1, log2n, data) 
                    - _fft(n    , depth+1, log2n, data)
            ) ;
        } else {
            return _fft(n    , depth+1, log2n, data) 
                 + _fft(n + m, depth+1, log2n, data);
        }
    }
}
コード例 #7
0
ファイル: jamdetect.c プロジェクト: esayers/capstone-schwarz
void fft(cplx buf[], int n){
    int i;

    cplx *out = prot_mem_malloc(sizeof(cplx) * n);


    for (i = 0; i < n; i++) out[i] = buf[i];

    _fft(buf, out, n, 1);

        /* Flip the data across the y axis */
    for (i = 0; i < n/2; i++) out[i+n/2] = buf[i];
    for (i = 0; i < n/2; i++) out[i] = buf[i+n/2];
    for (i = 0; i < n; i++) buf[i] = out[i]/n;

    prot_mem_free(out);

}
コード例 #8
0
ファイル: fft.c プロジェクト: ldelchambre/wcorrQRL
double *ifft(double *F, const double *X, const unsigned long nvar, const unsigned long nobs) {
  return _fft(F,X,nvar,nobs,1);
}
コード例 #9
0
/**
* @details
* Method to run the channeliser.
*
* The channeliser performs channelisation of a number of sub-bands containing
* a complex time series.
*
* Parallelisation, by means of openMP threads, is carried out by splitting
* the sub-bands as evenly as possible between threads.
*
* @param[in]  timeSeries 	Buffer of time samples to be channelised.
* @param[out] spectrum	 	Set of spectra produced.
*/
void PPFChanneliser::run(const TimeSeriesDataSetC32* timeSeries,
        SpectrumDataSetC32* spectra)
{
    // Perform a number of sanity checks on the input data.
    _checkData(timeSeries);

    // Make local copies of the data dimensions.
    unsigned nSubbands      = timeSeries->nSubbands();
    unsigned nPolarisations = timeSeries->nPolarisations();
    unsigned nTimeBlocks    = timeSeries->nTimeBlocks();
    unsigned nTimesPerBlock = timeSeries->nTimesPerBlock();

    // Resize the output spectra blob (if required).
    spectra->resize(nTimeBlocks, nSubbands, nPolarisations, _nChannels);

    // Set the timing parameters - Only need the timestamp of the first packet
    // for this version of the Channeliser.
    spectra->setLofarTimestamp(timeSeries->getLofarTimestamp());
    spectra->setBlockRate(timeSeries->getBlockRate() * _nChannels);

    const float* coeffs = &_coeffs[0];
    unsigned threadId = 0, nThreads = 0, start = 0, end = 0;
    Complex *workBuffer = 0, *filteredSamples = 0;
    Complex const * timeData = 0;
    const Complex* timeStart = timeSeries->constData();
    Complex* spectraStart = spectra->data();

    if (_nChannels == 1)
    {
         // Loop over data to be channelised.
         for (unsigned subband = 0; subband < nSubbands; ++subband)
         {
             for (unsigned pol = 0; pol < nPolarisations; ++pol)
             {
                 for (unsigned block = 0; block < nTimeBlocks; ++block)
                 {
                     // Get pointer to time series array.
                     unsigned index = timeSeries->index(subband, nTimesPerBlock,
                                  pol, nPolarisations, block, nTimeBlocks);
                     timeData = &timeStart[index];
                     for (unsigned t = 0; t < nTimesPerBlock; ++t) {
                         // FFT the filtered sub-band data to form a new spectrum.
                         unsigned indexSpectra = spectra->index(subband, nSubbands,
                                 pol, nPolarisations, (nTimesPerBlock*block)+t, _nChannels);
//                         spectraStart = &spectra->data()[indexSpectra];
                         spectraStart[indexSpectra] = timeData[t];
                     }
                 }
             }
         }

    } else {
        // Set up work buffers (if required).
        unsigned nFilterTaps = _ppfCoeffs.nTaps();
        if (!_buffersInitialised)
            _setupWorkBuffers(nSubbands, nPolarisations, _nChannels, nFilterTaps);

        // Channeliser processing.
        #pragma omp parallel \
            shared(nTimeBlocks, nPolarisations, nSubbands, nFilterTaps, coeffs,\
                    timeStart, spectraStart) \
            private(threadId, nThreads, start, end, workBuffer, filteredSamples, \
                    timeData)
        {
            threadId = omp_get_thread_num();
            nThreads = omp_get_num_threads();

            // Assign processing threads in a round robin fashion to subbands.
            _assign_threads(start, end, nSubbands, nThreads, threadId);

            // Pointer to work buffer for the thread.
            filteredSamples = &_filteredData[threadId][0];

            // Loop over data to be channelised.
            for (unsigned subband = start; subband < end; ++subband)
            {
                for (unsigned pol = 0; pol < nPolarisations; ++pol)
                {
                    for (unsigned block = 0; block < nTimeBlocks; ++block)
                    {
                        // Get pointer to time series array.
                        unsigned index = timeSeries->index(subband, nTimesPerBlock,
                                     pol, nPolarisations, block, nTimeBlocks);
                        timeData = &timeStart[index];

                        // Get a pointer to the work buffer.
                        workBuffer = &(_workBuffer[subband * nPolarisations + pol])[0];

                        // Update buffered (lagged) data for the sub-band.
                        _updateBuffer(timeData, _nChannels, nFilterTaps, workBuffer);

                        // Apply the PPF.
                        _filter(workBuffer, nFilterTaps, _nChannels, coeffs, filteredSamples);

                        // FFT the filtered sub-band data to form a new spectrum.
                        unsigned indexSpectra = spectra->index(subband, nSubbands,
                                pol, nPolarisations, block, _nChannels);
                        _fft(filteredSamples, &spectraStart[indexSpectra]);
                    }
                }
            }

        } // end of parallel region.

    }

}
コード例 #10
0
ファイル: convolver.cpp プロジェクト: flair2005/avrs
/** Initialize the convolver. 
 * It also sets the filter to be a Dirac. Thus, if no filter is specified
 * the audio data are not affected. However, quite some computational 
 * load for nothing...
 *
 * You should use create() instead of directly using this constructor.
 * @throw std::bad_alloc if not enough memory could be allocated
 * @throw std::runtime_error if sizeof(float) != 4
 **/
Convolver::Convolver(const nframes_t nframes, const crossfade_t crossfade_type)
		throw (std::bad_alloc, std::runtime_error) :
		_frame_size(nframes), _partition_size(nframes + nframes), _crossfade_type(
				crossfade_type), _no_of_partitions_to_process(0), _old_weighting_factor(
				0)
{
	// make sure that SIMD instructions can be used properly
	if (sizeof(float) != 4)
	{
		throw(std::runtime_error("sizeof(float) on your computer is not 4! "
				" The convolution can not take place properly."));
	}

	_signal.clear();
	_waiting_queue.clear();

	// allocate memory and initialize to 0
	_fft_buffer.resize(_partition_size, 0.0f);
	_ifft_buffer.resize(_partition_size, 0.0f);

	// create first partition
	//_filter_coefficients.resize(_partition_size, 0.0f);

	_zeros.resize(_partition_size, 0.0f);

	_output_buffer.resize(_frame_size, 0.0f);

	// create fades if required
	if (_crossfade_type != none)
	{
		// init memory
		_fade_in.resize(_frame_size, 0.0f);
		_fade_out.resize(_frame_size, 0.0f);

		// this is the ifft normalization factor (fftw3 does not normalize)
		const float norm = 1.0f / _partition_size;

		// raised cosine fade
		if (_crossfade_type == raised_cosine)
		{
			// create fades
			for (unsigned int n = 0u; n < _frame_size; n++)
			{
				_fade_in[n] = norm
						* (0.5f
								+ 0.5f
										* cos(
												static_cast<float>(_frame_size
														- n) / _frame_size
														* pi_float));
				_fade_out[n] = norm
						* (0.5f
								+ 0.5f
										* cos(
												static_cast<float>(n)
														/ _frame_size
														* pi_float));
			} // for
		} // if

		// linear fade
		else if (_crossfade_type == linear)
		{
			// create fades
			for (unsigned int n = 0u; n < _frame_size; n++)
			{
				_fade_in[n] = norm * (static_cast<float>(n) / _frame_size);
				_fade_out[n] = norm
						* (static_cast<float>(_frame_size - n) / _frame_size);
			} // for
		} // else if

	} // if

	// create fft plans for halfcomplex data format
	_fft_plan = fftwf_plan_r2r_1d(_partition_size, &_fft_buffer[0],
			&_fft_buffer[0], FFTW_R2HC, FFTW_PATIENT);
	_ifft_plan = fftwf_plan_r2r_1d(_partition_size, &_ifft_buffer[0],
			&_ifft_buffer[0], FFTW_HC2R, FFTW_PATIENT);

	// calculate transfer function of a dirac
	std::copy(_zeros.begin(), _zeros.end(), _fft_buffer.begin());

	_fft_buffer[0] = 1.0f;
	_fft();

	// store dirac
	_neutral_filter = _fft_buffer;

	// clear _fft_buffer
	std::copy(_zeros.begin(), _zeros.end(), _fft_buffer.begin());

	// set dirac as default filter
	set_neutral_filter();
}
コード例 #11
0
ファイル: convolver.cpp プロジェクト: flair2005/avrs
/** Fast convolution of audio signal frame.
 * TODO: This function might provides some (slight) potential to optimize the 
 * performance regarding the buffer management. 
 * @param input_signal pointer to the first audio sample in the frame to be
 * convolved.
 * @param weighting_factor amplitude weighting factor for current signal frame
 * The filter has to be set via \b Convolver::set_filter_t or 
 * \b Convolver::set_filter_f. The filter is stored. If you want 
 * to keep the previous filter you don't need to update it.
 * @return pointer to the first sample of the convolved and weighted signal
 */
float*
Convolver::convolve_signal(float *input_signal, float weighting_factor)
{
	/////////////////////////////////////////////////
	////// check if processing has to be done ///////
	/////////////////////////////////////////////////
	if (!weighting_factor || !_contains_data(input_signal, _frame_size))
	{
		if (!_no_of_partitions_to_process)
		{
			// no processing has to be done

			// make sure that no previous signal frames are reused
			_signal.clear();

			// make sure that output buffer is empty
			std::copy(_zeros.begin(), _zeros.begin() + _frame_size,
					_output_buffer.begin());

			// set current filter in order to assure smooth re-fade-in
			_update_filter_partitions();

			return &_output_buffer[0];
		}
		else
		// if there are still partitions to be convolved
		{
			_no_of_partitions_to_process--;
		}
	}
	// if there is data in input signal
	else
		_no_of_partitions_to_process = _waiting_queue.size();

	//////////////////////////////////////////////
	/////// processing has to be done ////////////
	//////////////////////////////////////////////

	// add current signal frame to _fft_buffer
	// _fft_buffer holds two signal frames
	std::copy(input_signal, input_signal + _frame_size,
			_fft_buffer.begin() + _frame_size);

	// signal fft
	_fft();

	// save signal partition in frequency domain
	_signal.push_back(_fft_buffer);

	// add signal to fft buffer (for the upcoming cycle)
	std::copy(input_signal, input_signal + _frame_size, _fft_buffer.begin());

	// if we crossfade, then convolve current audio frame
	// with previous filter
	if (_crossfade_type != none)
	{
		// erase most ancient audio signal frame
		if (_signal.size() > _filter_coefficients.size() / _partition_size)
			_signal.erase(_signal.begin());

		// multiplication of spectra
		_multiply_spectra();

		// signal ifft
		_ifft();

		// store data in output buffer
		std::copy(_ifft_buffer.begin() + _frame_size, _ifft_buffer.end(),
				_output_buffer.begin());
	}

	// set current filter
	_update_filter_partitions();

	// this loops when a long filter has been replaced by a short one
	while (_signal.size() > _filter_coefficients.size() / _partition_size)
	{
		_signal.erase(_signal.begin());
	}

	// multiplication of spectra
	_multiply_spectra();

	// signal ifft
	_ifft();

	// create proper output signal depending on crossfade
	if (_crossfade_type == none)
	{
		// normalize buffer (fftw3 does not do this)
		_normalize_buffer(&_ifft_buffer[_frame_size], weighting_factor);

		// return second half of _ifft_buffer
		return &_ifft_buffer[_frame_size];
	}
	else
	{
		// here, FFT normalization is included in the crossfades
		_crossfade_into_buffer(_output_buffer, weighting_factor);

		return &_output_buffer[0];
	}
}
コード例 #12
0
/*
** MFCC feature extraction.
** Input: sph - input speech signal
**           param - input feature extraction parameters
** Output: feat - extracted features
**
** Tips: Before calling this function, you should have the *sph* and *param* input initialized and 
**     memory allocated. *param* can be initialized by *set_param* function, if it havn't been initialized, 
**     we will call the *init_param* function to initialize it with a group of default parameters. 
*/
int mfcc(PTRSPH sph, PTRFEAT feat, PTRPARAM param) {
	int i,j;
	Frames frame;
	//unsigned short *idx = NULL;
	double *frm = NULL,*e = NULL,*c = NULL;

	/* some initialization */
	// initialize feature extraction parameters
	if(_isParamInit==false){
		_init_param(param);
	}
	// FFT initialize
	if(_isFFTInit==false ){
		if(_fft_init(param->fftNpts, &(param->_fftm))){ // first initialize
			fprintf(stderr,"initialize FFT kernel failed. In line %d, in file: %s\n",__LINE__,__FILE__);
			_isFFTInit = false;
			return -1;
		}
	}
	// filter_bank initialize
	e = (double*)malloc(param->nFilters*sizeof(double));
	if(!e){ 
		fprintf(stderr,"memory realloc failed in line %d, in file: %s\n",__LINE__,__FILE__); 
		free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0);
		return -1; 
	}
	if(_isFBinit == false){
		if( _set_mel_idx(param->nFilters,param,sph->fs)){ 
			fprintf(stderr,"_set_mel_idx failed in line %d, in file: %s\n",  __LINE__,__FILE__); 
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e);
			return -1;
		}
	}
	// DCT initialize
	if(_isDCTInit==false ){
		if(_dct_init(param->nFilters, param->nCeps)){  // first initialize
			fprintf(stderr,"initialize DCT kernel failed. In line %d, in file: %s\n",__LINE__,__FILE__);
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx);
			_isDCTInit = false;
			return -1;
		}
	}
	c = (double*)malloc((param->nCeps+1)*sizeof(double));
	if(!c){ 
		fprintf(stderr,"memory realloc failed in line %d, in file: %s\n",__LINE__,__FILE__);
		free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk);
		return -1; 
	}
	// lifter initialize
	if(_isLifter==false) {
		if(_lifter_set(param->nLifter, param->nCeps)){
			fprintf(stderr,"liftering initialize failed!\n");
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk);free(c);
			return -1;
		}
	}
	
	/* front-end process */
	// pre-emphasis
	_premphasis(sph, param->emphco);
	// divide the signal into frames
	if(_vec2frame(sph, &frame, param->frameLen, param->frameShift)){ return -1; }
	// weighting window
	if(_weightwin(&frame,param->win)){ return -1; }

	// alloc memory for feature results
	feat->vl = param->nCeps;
	feat->vs = frame.nf;
	feat->data = (double*)realloc(feat->data, sizeof(double)*feat->vl*feat->vs);
	if(!feat->data){
		fprintf(stderr,"memory realloc failed for feat->data in line %d, in file %s.\n",__LINE__,__FILE__);
		free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk); free(c);
		return -1;
	}

	/* key transformations for each frame */
	frm = frame.data;
	for(i=0;i<frame.nf;i++){ 
		// copy frame to buffer
		memcpy(_fftbuf,frm+i*frame.fs,sizeof(double)*frame.fs);
		for(j=frame.fs;j<param->fftNpts;j++)
			*(_fftbuf+j) = (double)0.0;
		
		if(_fft(param)){  // apply FFT
			fprintf(stderr, "apply FFT failed!\n");
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk); free(c);
			return -1;
		}
		
		if(_filterbank(_fftbuf,param->fftNpts, param->nFilters, 0, 1, e)) {  // apply the filter bank
			fprintf(stderr,"apply filter bank failed\n");
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk); free(c);
			return -1;
		}
		
		if(_dct(e,c)){  // apply DCT
			fprintf(stderr,"apply DCT failed\n");
			free(_fftbuf); free(_w1c); free(_w3c);  free(_jx0); free(e); free(idx); free(_dctk); free(c);
			return -1;
		}

		for(j=0;j<param->nCeps;j++)
			*(c+j) *= *(_rlifter+j);

		/* save the extracted feature to feat->data */
		memcpy(feat->data+feat->vl*i, c, sizeof(double)*feat->vl);
	}

	/* clear up */
	free(e); free(c);free(frame.data);
	return 0;
}