QByteArray &VariCodePipeDecoder::Decode(QVector<short> &bits) { decodedbytes.resize(bits.size()+1); decodedbytes.resize(varicode_decode(&varicode_dec_states,decodedbytes.data(),bits.data(),decodedbytes.size(),bits.size()));//varicode_decode does not work correctly if you say only one output char. is this a bug? return decodedbytes; }
void per_frame_rx_processing(short output_buf[], /* output buf of decoded speech samples */ int *n_output_buf, /* how many samples currently in output_buf[] */ int codec_bits[], /* current frame of bits for decoder */ short input_buf[], /* input buf of modem samples input to demod */ int *n_input_buf /* how many samples currently in input_buf[] */ ) { int sync_bit; COMP rx_fdm[FDMDV_MAX_SAMPLES_PER_FRAME]; int rx_bits[FDMDV_BITS_PER_FRAME]; unsigned char packed_bits[BYTES_PER_CODEC_FRAME]; float rx_spec[FDMDV_NSPEC]; int i, nin_prev, bit, byte; int next_state; assert(*n_input_buf <= (2*FDMDV_NOM_SAMPLES_PER_FRAME)); /* This while loop will run the demod 0, 1 (nominal) or 2 times: 0: when tx sample clock runs faster than rx, occasionally we will run out of samples 1: normal, run decoder once, every 2nd frame output a frame of speech samples to D/A 2: when tx sample clock runs slower than rx, occasionally we will have enough samples to run demod twice. With a +/- 10 Hz sample clock difference at FS=8000Hz (+/- 1250 ppm), case 0 or 1 occured about once every 30 seconds. This is no problem for the decoded audio. */ while(*n_input_buf >= g_nin) { // demod per frame processing for(i=0; i<g_nin; i++) { rx_fdm[i].real = (float)input_buf[i]/FDMDV_SCALE; rx_fdm[i].imag = 0.0; } nin_prev = g_nin; fdmdv_demod(fdmdv, rx_bits, &sync_bit, rx_fdm, &g_nin); *n_input_buf -= nin_prev; assert(*n_input_buf >= 0); // shift input buffer for(i=0; i<*n_input_buf; i++) input_buf[i] = input_buf[i+nin_prev]; // compute rx spectrum & get demod stats, and update GUI plot data fdmdv_get_rx_spectrum(fdmdv, rx_spec, rx_fdm, nin_prev); // Average rx spectrum data using a simple IIR low pass filter for(i = 0; i < FDMDV_NSPEC; i++) { g_avmag[i] = BETA * g_avmag[i] + (1.0 - BETA) * rx_spec[i]; } fdmdv_get_demod_stats(fdmdv, &stats); jni_update_stats(&stats, g_avmag); count++; /* State machine to: + Mute decoded audio when out of sync. The demod is synced when we are using the fine freq estimate and SNR is above a thresh. + Decode codec bits only if we have a 0,1 sync bit sequence. Collects two frames of demod bits to decode one frame of codec bits. */ next_state = g_state; switch (g_state) { case 0: /* mute output audio when out of sync */ if (*n_output_buf < 2*codec2_samples_per_frame(codec2) - N8) { for(i=0; i<N8; i++) output_buf[*n_output_buf + i] = 0; *n_output_buf += N8; } if (!(*n_output_buf <= (2*codec2_samples_per_frame(codec2)))) { LOGE("*n_output_buf <= (2*codec2_samples_per_frame(codec2))"); } if ((stats.fest_coarse_fine == 1))// && (stats.snr_est > 3.0)) next_state = 1; break; case 1: if (sync_bit == 0) { next_state = 2; /* first half of frame of codec bits */ memcpy(codec_bits, rx_bits, FDMDV_BITS_PER_FRAME*sizeof(int)); } else next_state = 1; if (stats.fest_coarse_fine == 0) next_state = 0; break; case 2: next_state = 1; if (stats.fest_coarse_fine == 0) next_state = 0; if (sync_bit == 1) { /* second half of frame of codec bits */ memcpy(&codec_bits[FDMDV_BITS_PER_FRAME], rx_bits, FDMDV_BITS_PER_FRAME*sizeof(int)); // extract data bit int data_flag_index = codec2_get_spare_bit_index(codec2); assert(data_flag_index != -1); // not supported for all rates short abit = codec_bits[data_flag_index]; char ascii_out; int n_ascii = varicode_decode(&g_varicode_dec_states, &ascii_out, &abit, 1, 1); assert((n_ascii == 0) || (n_ascii == 1)); if (n_ascii) { short ashort = ascii_out; LOGD("%c", ashort); } // reconstruct missing bit we steal for data bit and decode // speech codec2_rebuild_spare_bit(codec2, codec_bits); /* pack bits, MSB received first */ bit = 7; byte = 0; memset(packed_bits, 0, BYTES_PER_CODEC_FRAME); for(i=0; i<BITS_PER_CODEC_FRAME; i++) { packed_bits[byte] |= (codec_bits[i] << bit); bit--; if (bit < 0) { bit = 7; byte++; } } assert(byte == BYTES_PER_CODEC_FRAME); /* add decoded speech to end of output buffer */ if (*n_output_buf <= codec2_samples_per_frame(codec2)) { codec2_decode(codec2, &output_buf[*n_output_buf], packed_bits); *n_output_buf += codec2_samples_per_frame(codec2); } assert(*n_output_buf <= (2*codec2_samples_per_frame(codec2))); } break; } if (!!g_state != !!next_state) { jni_update_sync(g_state == 0); } g_state = next_state; } }