/* * E_MAIN_init * * Parameters: * spe_state I/O: pointer to state structure * * Function: * Initialisation of variables for the coder section. * Memory allocation. * * Returns: * void */ Word16 E_MAIN_init(void **spe_state) { Coder_State *st; *spe_state = NULL; /* allocate memory */ if ((st = (Coder_State *) malloc(sizeof(Coder_State))) == NULL) { return(-1); } st->vadSt = NULL; st->dtx_encSt = NULL; E_DTX_init(&(st->dtx_encSt)); E_DTX_vad_init(&(st->vadSt)); E_MAIN_reset((void *) st, 1); *spe_state = (void*)st; return(0); }
/* * E_MAIN_encode * * Parameters: * mode I: used mode * input_sp I: 320 new speech samples (at 16 kHz) * prms O: output parameters * spe_state B: state structure * allow_dtx I: DTX ON/OFF * * Function: * Main coder routine. * * Returns: * void */ Word16 E_MAIN_encode(Word16 * mode, Word16 speech16k[], Word16 prms[], void *spe_state, Word16 allow_dtx) { /* Float32 */ Float32 f_speech16k[L_FRAME16k]; /* Speech vector */ Float32 f_old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL]; /* Excitation vector */ Float32 f_exc2[L_FRAME]; /* excitation vector */ Float32 error[M + L_SUBFR]; /* error of quantization */ Float32 A[NB_SUBFR * (M + 1)]; /* A(z) unquantized for the 4 subframes */ Float32 Aq[NB_SUBFR * (M + 1)]; /* A(z) quantized for the 4 subframes */ Float32 xn[L_SUBFR]; /* Target vector for pitch search */ Float32 xn2[L_SUBFR]; /* Target vector for codebook search */ Float32 dn[L_SUBFR]; /* Correlation between xn2 and h1 */ Float32 cn[L_SUBFR]; /* Target vector in residual domain */ Float32 h1[L_SUBFR]; /* Impulse response vector */ Float32 f_code[L_SUBFR]; /* Fixed codebook excitation */ Float32 y1[L_SUBFR]; /* Filtered adaptive excitation */ Float32 y2[L_SUBFR]; /* Filtered adaptive excitation */ Float32 synth[L_SUBFR]; /* 12.8kHz synthesis vector */ Float32 r[M + 1]; /* Autocorrelations of windowed speech */ Float32 Ap[M + 1]; /* A(z) with spectral expansion */ Float32 ispnew[M]; /* immittance spectral pairs at 4nd sfr */ Float32 isf[M]; /* ISF (frequency domain) at 4nd sfr */ Float32 g_coeff[5], g_coeff2[2]; /* Correlations */ Float32 gain_pit; Float32 f_tmp, gain1, gain2; Float32 stab_fac = 0.0F, fac; Float32 *new_speech, *speech; /* Speech vector */ Float32 *wsp; /* Weighted speech vector */ Float32 *f_exc; /* Excitation vector */ Float32 *p_A, *p_Aq; /* ptr to A(z) for the 4 subframes */ Float32 *f_pt_tmp; /* Word32 */ Word32 indice[8]; /* quantization indices */ Word32 vad_flag, clip_gain; Word32 T_op, T_op2, T0, T0_frac; Word32 T0_min, T0_max; Word32 voice_fac, Q_new = 0; Word32 L_gain_code, l_tmp; Word32 i, i_subfr, pit_flag; /* Word16 */ Word16 exc2[L_FRAME]; /* excitation vector */ Word16 s_Aq[NB_SUBFR * (M + 1)]; /* A(z) quantized for the 4 subframes */ Word16 s_code[L_SUBFR]; /* Fixed codebook excitation */ Word16 ispnew_q[M]; /* quantized ISPs at 4nd subframe */ Word16 isfq[M]; /* quantized ISPs */ Word16 select, codec_mode; Word16 index; Word16 s_gain_pit, gain_code; Word16 s_tmp, s_max; Word16 corr_gain; Word16 *exc; /* Excitation vector */ /* Other */ Coder_State *st; /* Coder states */ /* Memory Usage eval */ st = (Coder_State *)spe_state; codec_mode = *mode; /* * Initialize pointers to speech vector. * * * |-------|-------|-------|-------|-------|-------| * past sp sf1 sf2 sf3 sf4 L_NEXT * <------- Total speech buffer (L_TOTAL) ------> * old_speech * <------- LPC analysis window (L_WINDOW) ------> * <-- present frame (L_FRAME) ----> * | <----- new speech (L_FRAME) ----> * | | * speech | * new_speech */ new_speech = st->mem_speech + L_TOTAL - L_FRAME - L_FILT + 460; /* New speech */ speech = st->mem_speech + L_TOTAL - L_FRAME - L_NEXT; /* Present frame */ exc = st->mem_exc + PIT_MAX + L_INTERPOL; f_exc = f_old_exc + PIT_MAX + L_INTERPOL; wsp = st->mem_wsp + (PIT_MAX / OPL_DECIM); for(i = 0; i < L_FRAME16k; i++) { f_speech16k[i] = (Float32)speech16k[i]; } Q_new = -st->mem_q; for(i = 0; i < (PIT_MAX + L_INTERPOL); i++) { f_old_exc[i] = (Float32)(st->mem_exc[i] * pow(2, Q_new)); } /* * Down sampling signal from 16kHz to 12.8kHz */ E_UTIL_decim_12k8(f_speech16k, L_FRAME16k, new_speech, st->mem_decim); /* decimate with zero-padding to avoid delay of filter */ memcpy(f_code, st->mem_decim, 2 * L_FILT16k * sizeof(Float32)); memset(error, 0, L_FILT16k * sizeof(Float32)); E_UTIL_decim_12k8(error, L_FILT16k, new_speech + L_FRAME, f_code); /* * Perform 50Hz HP filtering of input signal. * Perform fixed preemphasis through 1 - g z^-1 */ E_UTIL_hp50_12k8(new_speech, L_FRAME, st->mem_sig_in); memcpy(f_code, st->mem_sig_in, 4 * sizeof(Float32) ); E_UTIL_hp50_12k8(new_speech + L_FRAME, L_FILT, f_code); E_UTIL_f_preemph(new_speech, PREEMPH_FAC, L_FRAME, &(st->mem_preemph)); /* last L_FILT samples for autocorrelation window */ f_tmp = st->mem_preemph; E_UTIL_f_preemph(new_speech + L_FRAME, PREEMPH_FAC, L_FILT, &f_tmp); /* * Call VAD * Preemphesis scale down signal in low frequency and keep dynamic in HF. * Vad work slightly in future (new_speech = speech + L_NEXT - L_FILT). */ vad_flag = E_DTX_vad(st->vadSt, speech + L_NEXT - L_FILT); if (vad_flag == 0) { st->mem_vad_hist = 1; } else { st->mem_vad_hist = 0; } /* DTX processing */ if (allow_dtx) { /* Note that mode may change here */ E_DTX_tx_handler(st->dtx_encSt, vad_flag, mode); } else { E_DTX_reset(st->dtx_encSt); } if(*mode != MRDTX) { E_MAIN_parm_store(vad_flag, &prms); } /* * Perform LPC analysis * -------------------- * - autocorrelation + lag windowing * - Levinson-durbin algorithm to find a[] * - convert a[] to isp[] * - convert isp[] to isf[] for quantization * - quantize and code the isf[] * - convert isf[] to isp[] for interpolation * - find the interpolated isps and convert to a[] for the 4 subframes */ /* LP analysis centered at 3nd surame */ E_UTIL_autocorr(st->mem_speech, r); E_LPC_lag_wind(r + 1, M); /* Lagindowing */ E_LPC_lev_dur(A, r, M); E_LPC_a_isp_conversion(A, ispnew, st->mem_isp, M); /* From A(z) to isp */ /* Find the interpolated isps and convert to a[] for all subframes */ E_LPC_f_int_isp_find(st->mem_isp, ispnew, A, NB_SUBFR, M); /* update isp memory for the next frame */ memcpy(st->mem_isp, ispnew, M * sizeof(Float32)); /* Convert isps to frequency domain 0..6400 */ E_LPC_isp_isf_conversion(ispnew, isf, M); /* check resonance for pitch clipping algorithm */ E_GAIN_clip_isf_test(isf, st->mem_gp_clip); /* * Perform PITCH_OL analysis * ------------------------- * - Find the residual res[] for the whole speech frame * - Find the weighted input speech wsp[] for the whole speech frame * - Find the 2 open-loop pitch estimate * - Set the range for searching closed-loop pitch in 1st subframe */ p_A = A; for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { E_LPC_a_weight(p_A, Ap, GAMMA1, M); E_UTIL_residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR); p_A += (M + 1); } E_UTIL_deemph(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp_df)); /* decimation of wsp[] to search pitch in LF and to reduce complexity */ E_GAIN_lp_decim2(wsp, L_FRAME, st->mem_decim2); /* Find open loop pitch lag for whole speech frame */ if (*mode == MODE_7k) { /* Find open loop pitch lag for whole speech frame */ T_op = E_GAIN_open_loop_search(wsp, PIT_MIN / OPL_DECIM, PIT_MAX / OPL_DECIM, L_FRAME / OPL_DECIM, st->mem_T0_med, &(st->mem_ol_gain), st->mem_hf_wsp, st->mem_hp_wsp, st->mem_ol_wght_flg); } else { /* Find open loop pitch lag for first 1/2 frame */ T_op = E_GAIN_open_loop_search(wsp, PIT_MIN / OPL_DECIM, PIT_MAX / OPL_DECIM, (L_FRAME / 2) / OPL_DECIM, st->mem_T0_med, &(st->mem_ol_gain), st->mem_hf_wsp, st->mem_hp_wsp, st->mem_ol_wght_flg); } if (st->mem_ol_gain > 0.6) { st->mem_T0_med = E_GAIN_olag_median(T_op, st->mem_ol_lag); st->mem_ada_w = 1.0F; } else { st->mem_ada_w = st->mem_ada_w * 0.9F; } if (st->mem_ada_w < 0.8) { st->mem_ol_wght_flg = 0; } else { st->mem_ol_wght_flg = 1; } E_DTX_pitch_tone_detection(st->vadSt, st->mem_ol_gain); T_op *= OPL_DECIM; if (*mode != MODE_7k) { /* Find open loop pitch lag for second 1/2 frame */ T_op2 = E_GAIN_open_loop_search(wsp + ((L_FRAME / 2) / OPL_DECIM), PIT_MIN / OPL_DECIM, PIT_MAX / OPL_DECIM, (L_FRAME / 2) / OPL_DECIM, st->mem_T0_med, &st->mem_ol_gain, st->mem_hf_wsp, st->mem_hp_wsp, st->mem_ol_wght_flg); if (st->mem_ol_gain > 0.6) { st->mem_T0_med = E_GAIN_olag_median(T_op2, st->mem_ol_lag); st->mem_ada_w = 1.0F; } else { st->mem_ada_w = st->mem_ada_w * 0.9F; } if (st->mem_ada_w < 0.8) { st->mem_ol_wght_flg = 0; } else { st->mem_ol_wght_flg = 1; } E_DTX_pitch_tone_detection(st->vadSt, st->mem_ol_gain); T_op2 *= OPL_DECIM; } else { T_op2 = T_op; } /* * DTX-CNG */ if(*mode == MRDTX) { /* Buffer isf's and energy */ E_UTIL_residu(&A[3 * (M + 1)], speech, f_exc, L_FRAME); f_tmp = 0.0; for(i = 0; i < L_FRAME; i++) { f_tmp += f_exc[i] * f_exc[i]; } E_DTX_buffer(st->dtx_encSt, isf, f_tmp, codec_mode); /* Quantize and code the isfs */ E_DTX_exe(st->dtx_encSt, f_exc2, &prms); /* reset speech coder memories */ E_MAIN_reset(st, 0); /* * Update signal for next frame. * -> save past of speech[] and wsp[]. */ memcpy(st->mem_speech, &st->mem_speech[L_FRAME], (L_TOTAL - L_FRAME + 460) * sizeof(Float32)); memcpy(st->mem_wsp, &st->mem_wsp[L_FRAME / OPL_DECIM], (PIT_MAX / OPL_DECIM) * sizeof(Float32)); return(0); } /* * ACELP */ /* Quantize and code the isfs */ if (*mode <= MODE_7k) { E_LPC_isf_2s3s_quantise(isf, isfq, st->mem_isf_q, indice, 4); E_MAIN_parm_store((Word16)indice[0], &prms); E_MAIN_parm_store((Word16)indice[1], &prms); E_MAIN_parm_store((Word16)indice[2], &prms); E_MAIN_parm_store((Word16)indice[3], &prms); E_MAIN_parm_store((Word16)indice[4], &prms); } else { E_LPC_isf_2s5s_quantise(isf, isfq, st->mem_isf_q, indice, 4); E_MAIN_parm_store((Word16)indice[0], &prms); E_MAIN_parm_store((Word16)indice[1], &prms); E_MAIN_parm_store((Word16)indice[2], &prms); E_MAIN_parm_store((Word16)indice[3], &prms); E_MAIN_parm_store((Word16)indice[4], &prms); E_MAIN_parm_store((Word16)indice[5], &prms); E_MAIN_parm_store((Word16)indice[6], &prms); } /* Convert isfs to the cosine domain */ E_LPC_isf_isp_conversion(isfq, ispnew_q, M); if (*mode == MODE_24k) { /* Check stability on isf : distance between old isf and current isf */ f_tmp = 0.0F; f_pt_tmp = st->mem_isf; for (i=0; i < M - 1; i++) { f_tmp += (isf[i] - f_pt_tmp[i]) * (isf[i] - f_pt_tmp[i]); } stab_fac = (Float32)(1.25F - (f_tmp / 400000.0F)); if (stab_fac > 1.0F) { stab_fac = 1.0F; } if (stab_fac < 0.0F) { stab_fac = 0.0F; } memcpy(f_pt_tmp, isf, M * sizeof(Float32)); } if (st->mem_first_frame == 1) { st->mem_first_frame = 0; memcpy(st->mem_isp_q, ispnew_q, M * sizeof(Word16)); } /* Find the interpolated isps and convert to a[] for all subframes */ E_LPC_int_isp_find(st->mem_isp_q, ispnew_q, E_ROM_interpol_frac, s_Aq); for (i = 0; i < (NB_SUBFR * (M + 1)); i++) { Aq[i] = s_Aq[i] * 0.000244140625F; /* 1/4096 */ } /* update isp memory for the next frame */ memcpy(st->mem_isp_q, ispnew_q, M * sizeof(Word16)); /* * Find the best interpolation for quantized ISPs */ p_Aq = Aq; for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { E_UTIL_residu(p_Aq, &speech[i_subfr], &f_exc[i_subfr], L_SUBFR); p_Aq += (M + 1); } /* Buffer isf's and energy for dtx on non-speech frame */ if(vad_flag == 0) { f_tmp = 0.0F; for(i = 0; i < L_FRAME; i++) { f_tmp += f_exc[i] * f_exc[i]; } E_DTX_buffer(st->dtx_encSt, isf, f_tmp, codec_mode); } /* range for closed loop pitch search in 1st subframe */ T0_min = T_op - 8; if (T0_min < PIT_MIN) { T0_min = PIT_MIN; } T0_max = T0_min + 15; if (T0_max > PIT_MAX) { T0_max = PIT_MAX; T0_min = T0_max - 15; } /* * Loop for every subframe in the analysis frame * --------------------------------------------- * To find the pitch and innovation parameters. The subframe size is * L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times. * - compute the target signal for pitch search * - compute impulse response of weighted synthesis filter (h1[]) * - find the closed-loop pitch parameters * - encode the pitch dealy * - find 2 lt prediction (with / without LP filter for lt pred) * - find 2 pitch gains and choose the best lt prediction. * - find target vector for codebook search * - update the impulse response h1[] for codebook search * - correlation between target vector and impulse response * - codebook search and encoding * - VQ of pitch and codebook gains * - find voicing factor and tilt of code for next subframe. * - update states of weighting filter * - find excitation and synthesis speech */ p_A = A; p_Aq = Aq; for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { pit_flag = i_subfr; if ((i_subfr == (2 * L_SUBFR)) & (*mode > MODE_7k)) { pit_flag = 0; /* range for closed loop pitch search in 3rd subframe */ T0_min = T_op2 - 8; if (T0_min < PIT_MIN) { T0_min = PIT_MIN; } T0_max = T0_min + 15; if (T0_max > PIT_MAX) { T0_max = PIT_MAX; T0_min = T0_max - 15; } } /* * * Find the target vector for pitch search: * --------------------------------------- * * |------| res[n] * speech[n]---| A(z) |-------- * |------| | |--------| error[n] |------| * zero -- (-)--| 1/A(z) |-----------| W(z) |-- target * exc |--------| |------| * * Instead of subtracting the zero-input response of filters from * the weighted input speech, the above configuration is used to * compute the target vector. * */ for (i = 0; i < M; i++) { error[i] = (Float32)(speech[i + i_subfr - 16] - st->mem_syn[i]); } E_UTIL_residu(p_Aq, &speech[i_subfr], &f_exc[i_subfr], L_SUBFR); E_UTIL_synthesis(p_Aq, &f_exc[i_subfr], error + M, L_SUBFR, error, 0); E_LPC_a_weight(p_A, Ap, GAMMA1, M); E_UTIL_residu(Ap, error + M, xn, L_SUBFR); E_UTIL_deemph(xn, TILT_FAC, L_SUBFR, &(st->mem_w0)); /* * Find target in residual domain (cn[]) for innovation search. */ /* first half: xn[] --> cn[] */ memset(f_code, 0, M * sizeof(Float32)); memcpy(f_code + M, xn, (L_SUBFR / 2) * sizeof(Float32)); f_tmp = 0.0F; E_UTIL_f_preemph(f_code + M, TILT_FAC, L_SUBFR / 2, &f_tmp); E_LPC_a_weight(p_A, Ap, GAMMA1, M); E_UTIL_synthesis(Ap, f_code + M, f_code + M, L_SUBFR / 2, f_code, 0); E_UTIL_residu(p_Aq, f_code + M, cn, L_SUBFR / 2); /* second half: res[] --> cn[] (approximated and faster) */ for(i = (L_SUBFR / 2); i < L_SUBFR; i++) { cn[i] = f_exc[i_subfr + i]; } /* * Compute impulse response, h1[], of weighted synthesis filter */ E_LPC_a_weight(p_A, Ap, GAMMA1, M); memset(h1, 0, L_SUBFR * sizeof(Float32)); memcpy(h1, Ap, (M + 1) * sizeof(Float32)); E_UTIL_synthesis(p_Aq, h1, h1, L_SUBFR, h1 + (M + 1), 0); f_tmp = 0.0; E_UTIL_deemph(h1, TILT_FAC, L_SUBFR, &f_tmp); /* * Closed-loop fractional pitch search */ /* find closed loop fractional pitch lag */ if (*mode <= MODE_9k) { T0 = E_GAIN_closed_loop_search(&f_exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac, pit_flag, PIT_MIN, PIT_FR1_8b); /* encode pitch lag */ if (pit_flag == 0) /* if 1st/3rd subframe */ { /* * The pitch range for the 1st/3rd subframe is encoded with * 8 bits and is divided as follows: * PIT_MIN to PIT_FR1-1 resolution 1/2 (frac = 0 or 2) * PIT_FR1 to PIT_MAX resolution 1 (frac = 0) */ if (T0 < PIT_FR1_8b) { index = (Word16)(T0 * 2 + (T0_frac >> 1) - (PIT_MIN * 2)); } else { index = (Word16)(T0 - PIT_FR1_8b + ((PIT_FR1_8b - PIT_MIN) * 2)); } E_MAIN_parm_store(index, &prms); /* find T0_min and T0_max for subframe 2 and 4 */ T0_min = T0 - 8; if (T0_min < PIT_MIN) { T0_min = PIT_MIN; } T0_max = T0_min + 15; if (T0_max > PIT_MAX) { T0_max = PIT_MAX; T0_min = T0_max - 15; } }
/* * E_IF_encode * * * Parameters: * st I: pointer to state structure * mode I: Speech Mode * speech I: Input speech * serial O: Output octet structure IF2 or 16-bit serial stream * dtx I: use DTX * * Function: * Encoding and packing one frame of speech * * Returns: * number of octets */ int E_IF_encode(void *st, Word16 req_mode, Word16 *speech, UWord8 *serial, Word16 dtx) { Word16 prms[NB_PARM_MAX]; Word32 i; Word16 frame_type, mode, reset_flag; WB_enc_if_state *s; s = (WB_enc_if_state *)st; mode = req_mode; /* check for homing frame */ reset_flag = E_IF_homing_frame_test(speech); if (!reset_flag) { for (i = 0; i < L_FRAME16k; i++) /* Delete the 2 LSBs (14-bit input) */ { speech[i] = (Word16) (speech[i] & 0xfffC); } E_MAIN_encode(&mode, speech, prms, s->encoder_state, dtx); if (mode == MRDTX) { s->sid_update_counter--; if (s->prev_ft == TX_SPEECH) { frame_type = TX_SID_FIRST; s->sid_update_counter = 3; } else { if ((s->sid_handover_debt > 0) && (s->sid_update_counter > 2)) { /* * ensure extra updates are * properly delayed after a possible SID_FIRST */ frame_type = TX_SID_UPDATE; s->sid_handover_debt--; } else { if (s->sid_update_counter == 0) { frame_type = TX_SID_UPDATE; s->sid_update_counter = 8; } else { frame_type = TX_NO_DATA; mode = MRNO_DATA; } } } } else { s->sid_update_counter = 8; frame_type = TX_SPEECH; } s->prev_ft = frame_type; } /* perform homing if homing frame was detected at encoder input */ else { E_MAIN_reset(s->encoder_state, 1); E_IF_sid_sync_reset(s); E_IF_homing_coding(prms, mode); frame_type = TX_SPEECH; } #ifdef IF2 return E_IF_if2_conversion(mode, prms, serial, frame_type, req_mode); #else return E_IF_mms_conversion(mode, prms, serial, frame_type, req_mode); #endif }