// Compute Chebyshev coefficients for the specified vector
void FeatureChebyshev::ChebyshevCoefficients(const vector<float> &input,
                                             int coeff_cnt, float *coeff) {
  // re-sample function
  int input_range = (input.size() - 1);
  vector<float> resamp(coeff_cnt);
  for (int samp_idx = 0; samp_idx < coeff_cnt; samp_idx++) {
    // compute sampling position
    float samp_pos = input_range *
                     (1 + cos(M_PI * (samp_idx + 0.5) / coeff_cnt)) / 2;
    // interpolate
    int samp_start = static_cast<int>(samp_pos);
    int samp_end = static_cast<int>(samp_pos + 0.5);
    float func_delta = input[samp_end] - input[samp_start];
    resamp[samp_idx] = input[samp_start] +
                       ((samp_pos - samp_start) * func_delta);
  }
  // compute the coefficients
  float normalizer = 2.0 / coeff_cnt;
  for (int coeff_idx = 0; coeff_idx < coeff_cnt; coeff_idx++, coeff++) {
    double sum = 0.0;
    for (int samp_idx = 0; samp_idx < coeff_cnt; samp_idx++) {
        sum += resamp[samp_idx] * cos(M_PI * coeff_idx * (samp_idx + 0.5) /
                                      coeff_cnt);
    }
    (*coeff) = (normalizer * sum);
  }
}
예제 #2
0
파일: resamp.c 프로젝트: dilane007/D_DSP
void resamp_complex(int interp_factor_L, int decim_factor_M,
                    int num_taps_per_phase, int *p_current_phase,
                    const double *const p_H, double *const p_Z_real,
                    double *const p_Z_imag, int num_inp,
                    const double *p_inp_real, const double *p_inp_imag,
                    double *p_out_real, double *p_out_imag, int * p_num_out)
{
    int current_phase = *p_current_phase;
    resamp(interp_factor_L, decim_factor_M, num_taps_per_phase,
           &current_phase, p_H, p_Z_real, num_inp, p_inp_real, p_out_real,
           p_num_out);

    resamp(interp_factor_L, decim_factor_M, num_taps_per_phase,
           p_current_phase, p_H, p_Z_imag, num_inp, p_inp_imag, p_out_imag,
           p_num_out);
}
예제 #3
0
// Takes audio input from Mic and passes it to Rat media for transmission
static OSStatus 
//AudioDeviceIOProc
audioIOProc(AudioDeviceID inDevice, const AudioTimeStamp* inNow, 
            const AudioBufferList* inInputData, const AudioTimeStamp* inInputTime, 
	    AudioBufferList* outOutputData, const AudioTimeStamp* inOutputTime, 
	    void* inClientData)
{
    UNUSED(inDevice);
    UNUSED(inNow);
    UNUSED(inInputTime);
    UNUSED(outOutputData);
    UNUSED(inOutputTime);
    UNUSED(inClientData);

    //If input is provided, copy it into the read buffer.
    if (inInputData != NULL && inInputData->mNumberBuffers > 0) {
        // Get the input data.
        Float32* ip = (Float32*)inInputData->mBuffers[0].mData;
	int providedFrames = inInputData->mBuffers[0].mDataByteSize / devices[add].inputStreamBasicDescription_.mBytesPerFrame;
	//debug_msg("providedFrames  -%d\n",providedFrames );
	//debug_msg("inInputData->mBuffers[0].mDataByteSize-%d\n",inInputData->mBuffers[0].mDataByteSize);
	//printf("read mSampleRate: %f\n",devices[add].inputStreamBasicDescription_.mSampleRate);

	//struct device * dev = (struct device *) inClientData;
	//debug_msg("dev->inputStreamBasicDescription_.mBytesPerFrame-%d\n", devices[add].inputStreamBasicDescription_.mBytesPerFrame);
        // Convert the audio to mono.
        double* monoInput = malloc(sizeof(double)*providedFrames);
	int sample;
        for (sample = 0; sample < providedFrames; sample++)
	 {
        	monoInput[sample] = *ip;
           	ip += inInputData->mBuffers[0].mNumberChannels;
        };
        // Resample the raw data.
        double* monoOutput = malloc(sizeof(double)*providedFrames);
        int numOutput = 0;
        int factorL = 80, factorM = 441;
        resamp(factorL, factorM, DECIM441_LENGTH / factorL, &(currentPhase_), decim441, zLine_, providedFrames, monoInput, monoOutput, &numOutput);
        // Convert the output to mulaw and store it in the read buffer.
        for (sample = 0; sample < numOutput; sample++) {
         //   if (muteInput_) {
          //      readBuffer_[inputWriteIndex_++] = lintomulaw[0];
                //readBuffer_[inputWriteIndex_++] = 0;
           // } else {
			//debug_msg("audioIOProc1\n");
			// Apply the audio gain.
			monoOutput[sample] *= devices[add].inputGain_;
			// Clip the audio data.
            		if (monoOutput[sample] > 1.0) monoOutput[sample] = 1.0;
            		else if (monoOutput[sample] < -1.0) monoOutput[sample] = -1.0;
			// Convert to signed 16-bit.
			// poslem 1 kHz ton
			//monoOutput[sample] = kilo[k++];
			//if (k == 8) k = 0;

			//SInt16 si = (monoOutput[sample] >= 0) ? (SInt16)(monoOutput[sample] * 32767.0) : (SInt16)(monoOutput[sample] * 32768.0);
			SInt16 si = 32767*monoOutput[sample];

			//printf("%d\n", si);
			// Convert to 8-bit mulaw and store in the read buffer.
			//readBuffer_[inputWriteIndex_++] = lintomulaw[si & 0xFFFF];
			//readBuffer_[inputWriteIndex_++] = si;
			//rb = readBuffer_ + inputWriteIndex_;
			
			memcpy(readBuffer_ + inputWriteIndex_, &si, 2);
			inputWriteIndex_ += 2;
	   // }
	    		if (inputWriteIndex_ == readBufferSize_) inputWriteIndex_ = 0;
	    //availableInput_++;
	    		availableInput_ += 2;
	}
	//printf("inputWriteIndex_: %d\n", inputWriteIndex_);
	// Clean up.
	free(monoInput); monoInput = NULL;
	free(monoOutput); monoOutput = NULL;
     };

	// Return success.*/
    return noErr;
};