/****************************************************************************** * Syntax: * int QRSFilter(int datum, int init) ; * Description: * QRSFilter() takes samples of an ECG signal as input and returns a sample of * a signal that is an estimate of the local energy in the QRS bandwidth. In * other words, the signal has a lump in it whenever a QRS complex, or QRS * complex like artifact occurs. The filters were originally designed for data * sampled at 200 samples per second, but they work nearly as well at sample * frequencies from 150 to 250 samples per second. * * The filter buffers and static variables are reset if a value other than * 0 is passed to QRSFilter through init. *******************************************************************************/ int QRSFilter(int datum, int init) { if (init) { hpfilt(0, 1); // Initialize filters. lpfilt(0, 1); mvwint(0, 1); deriv1(0, 1); deriv2(0, 1); } datum = lpfilt(datum, 0); // Low pass filter data. datum = hpfilt(datum, 0); // High pass filter data. datum = deriv2(datum, 0); // Take the derivative. datum = abs(datum); // Take the absolute value. datum = mvwint(datum, 0); // Average over an 80 ms window . return datum; }
int qrsfilter( int datum, int init ) { int fdatum ; //初始化滤波器 if(init) { hpfilt( 0, 1 ) ; lpfilt( 0, 1 ) ; mvwint( 0, 1 ) ; deriv1( 0, 1 ) ; deriv2( 0, 1 ) ; } fdatum = lpfilt( datum, 0 ); //低通滤波 fdatum = hpfilt( fdatum, 0 ); //高通滤波 fdatum = deriv2( fdatum, 0 ); //差分滤波 fdatum = abs( fdatum ); //取绝对值 fdatum = mvwint( fdatum, 0 ); //积分窗求和 return(fdatum); }
void lpc10_analyse(lpc10_encode_state_t *s, float speech[], int32_t voice[], int32_t *pitch, float *rms, float rc[]) { static const int32_t tau[60] = { 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156 }; static const int32_t buflim[4] = { 181, 720, 25, 720 }; static const float precoef = 0.9375f; float amdf[60]; float abuf[156]; float ivrc[2]; float temp; float phi[100] /* was [10][10] */; float psi[10]; int32_t half; int32_t midx; int32_t ewin[3][2]; int32_t i; int32_t j; int32_t lanal; int32_t ipitch; int32_t mintau; int32_t minptr; int32_t maxptr; /* Calculations are done on future frame due to requirements of the pitch tracker. Delay RMS and RC's 2 frames to give current frame parameters on return. */ for (i = 0; i <= 720 - LPC10_SAMPLES_PER_FRAME - 181; i++) { s->inbuf[i] = s->inbuf[LPC10_SAMPLES_PER_FRAME + i]; s->pebuf[i] = s->pebuf[LPC10_SAMPLES_PER_FRAME + i]; } for (i = 0; i <= 540 - LPC10_SAMPLES_PER_FRAME - 229; i++) s->ivbuf[i] = s->ivbuf[LPC10_SAMPLES_PER_FRAME + i]; for (i = 0; i <= 720 - LPC10_SAMPLES_PER_FRAME - 25; i++) s->lpbuf[i] = s->lpbuf[LPC10_SAMPLES_PER_FRAME + i]; for (i = 0, j = 0; i < s->osptr - 1; i++) { if (s->osbuf[i] > LPC10_SAMPLES_PER_FRAME) s->osbuf[j++] = s->osbuf[i] - LPC10_SAMPLES_PER_FRAME; } s->osptr = j + 1; s->voibuf[0][0] = s->voibuf[1][0]; s->voibuf[0][1] = s->voibuf[1][1]; for (i = 0; i < 2; i++) { s->vwin[i][0] = s->vwin[i + 1][0] - LPC10_SAMPLES_PER_FRAME; s->vwin[i][1] = s->vwin[i + 1][1] - LPC10_SAMPLES_PER_FRAME; s->awin[i][0] = s->awin[i + 1][0] - LPC10_SAMPLES_PER_FRAME; s->awin[i][1] = s->awin[i + 1][1] - LPC10_SAMPLES_PER_FRAME; s->obound[i] = s->obound[i + 1]; s->voibuf[i + 1][0] = s->voibuf[i + 2][0]; s->voibuf[i + 1][1] = s->voibuf[i + 2][1]; s->rmsbuf[i] = s->rmsbuf[i + 1]; for (j = 0; j < LPC10_ORDER; j++) s->rcbuf[i][j] = s->rcbuf[i + 1][j]; } /* If the average value in the frame was over 1/4096 (after current BIAS correction), then subtract that much more from samples in the next frame. If the average value in the frame was under -1/4096, add 1/4096 more to samples in next frame. In all other cases, keep BIAS the same. */ temp = 0.0f; for (i = 0; i < LPC10_SAMPLES_PER_FRAME; i++) { s->inbuf[720 - 2*LPC10_SAMPLES_PER_FRAME + i] = speech[i]*4096.0f - s->bias; temp += s->inbuf[720 - 2*LPC10_SAMPLES_PER_FRAME + i]; } if (temp > (float) LPC10_SAMPLES_PER_FRAME) s->bias++; else if (temp < (float) (-LPC10_SAMPLES_PER_FRAME)) s->bias--; /* Place voicing window */ i = 721 - LPC10_SAMPLES_PER_FRAME; s->zpre = preemp(&s->inbuf[i - 181], &s->pebuf[i - 181], LPC10_SAMPLES_PER_FRAME, precoef, s->zpre); onset(s, s->pebuf, s->osbuf, &s->osptr, 10, 181, 720, LPC10_SAMPLES_PER_FRAME); lpc10_placev(s->osbuf, &s->osptr, 10, &s->obound[2], s->vwin, 3, LPC10_SAMPLES_PER_FRAME, 90, 156, 307, 462); /* The Pitch Extraction algorithm estimates the pitch for a frame of speech by locating the minimum of the average magnitude difference function (AMDF). The AMDF operates on low-pass, inverse filtered speech. (The low-pass filter is an 800 Hz, 19 tap, equiripple, FIR filter and the inverse filter is a 2nd-order LPC filter.) The pitch estimate is later refined by dynamic tracking. However, since some of the tracking parameters are a function of the voicing decisions, a voicing decision must precede the final pitch estimation. */ /* See subroutines LPFILT, IVFILT, and eval_highres_amdf. */ /* LPFILT reads indices LBUFH-LFRAME-29 = 511 through LBUFH = 720 of INBUF, and writes indices LBUFH+1-LFRAME = 541 through LBUFH = 720 of LPBUF. */ lpfilt(&s->inbuf[228], &s->lpbuf[384], 312, LPC10_SAMPLES_PER_FRAME); /* IVFILT reads indices (PWINH-LFRAME-7) = 353 through PWINH = 540 of LPBUF, and writes indices (PWINH-LFRAME+1) = 361 through PWINH = 540 of IVBUF. */ ivfilt(&s->lpbuf[204], s->ivbuf, 312, LPC10_SAMPLES_PER_FRAME, ivrc); /* eval_highres_amdf reads indices PWINL = 229 through (PWINL-1)+MAXWIN+(TAU(LTAU)-TAU(1))/2 = 452 of IVBUF, and writes indices 1 through LTAU = 60 of AMDF. */ eval_highres_amdf(s->ivbuf, 156, tau, 60, amdf, &minptr, &maxptr, &mintau); /* Voicing decisions are made for each half frame of input speech. An initial voicing classification is made for each half of the analysis frame, and the voicing decisions for the present frame are finalized. See subroutine VOICIN. */ /* The voicing detector (VOICIN) classifies the input signal as unvoiced (including silence) or voiced using the AMDF windowed maximum-to-minimum ratio, the zero crossing rate, energy measures, reflection coefficients, and prediction gains. */ /* The pitch and voicing rules apply smoothing and isolated corrections to the pitch and voicing estimates and, in the process, introduce two frames of delay into the corrected pitch estimates and voicing decisions. */ for (half = 0; half < 2; half++) { lpc10_voicing(s, &s->vwin[2][0], s->inbuf, s->lpbuf, buflim, half, &amdf[minptr], &amdf[maxptr], &mintau, ivrc, s->obound); } /* Find the minimum cost pitch decision over several frames, given the current voicing decision and the AMDF array */ minptr++; dynamic_pitch_tracking(s, amdf, 60, &minptr, s->voibuf[3][1], pitch, &midx); ipitch = tau[midx - 1]; /* Place spectrum analysis and energy windows */ lpc10_placea(&ipitch, s->voibuf, &s->obound[2], 3, s->vwin, s->awin, ewin, LPC10_SAMPLES_PER_FRAME, 156); /* Remove short term DC bias over the analysis window. */ lanal = s->awin[2][1] + 1 - s->awin[2][0]; remove_dc_bias(&s->pebuf[s->awin[2][0] - 181], lanal, abuf); /* Compute RMS over integer number of pitch periods within the analysis window. */ /* Note that in a hardware implementation this computation may be simplified by using diagonal elements of phi computed by mload(). */ s->rmsbuf[2] = energyf(&abuf[ewin[2][0] - s->awin[2][0]], ewin[2][1] - ewin[2][0] + 1); /* Matrix load and invert, check RC's for stability */ mload(LPC10_ORDER, 1, lanal, abuf, phi, psi); invert(LPC10_ORDER, phi, psi, &s->rcbuf[2][0]); rcchk(LPC10_ORDER, &s->rcbuf[1][0], &s->rcbuf[2][0]); /* Set return parameters */ voice[0] = s->voibuf[1][0]; voice[1] = s->voibuf[1][1]; *rms = s->rmsbuf[0]; for (i = 0; i < LPC10_ORDER; i++) rc[i] = s->rcbuf[0][i]; }