void map_punctured<libbase::vector, dbl>::dotransform(const array1vd_t& pin, array1vd_t& pout) const { assertalways(pin.size() == pattern.size()); assertalways(pin(0).size() == Base::q); // final matrix size depends on the number of set positions libbase::allocate(pout, This::output_block_size(), Base::q); // puncture the likelihood tables for (int i = 0, ii = 0; i < pin.size(); i++) if (pattern(i)) pout(ii++) = pin(i); }
template <class GF_q, class real> void sum_prod_alg_abstract<GF_q, real>::compute_probs( array1vd_t& ro) { //ensure the output vector has the right length ro.init(this->length_n); //initialise some helper variables int num_of_elements = GF_q::elements(); real a_n = real(0.0); int size_of_M_n = 0; int pos_m; for (int loop_n = 0; loop_n < this->length_n; loop_n++) { ro(loop_n) = this->received_probs(loop_n); size_of_M_n = this->M_n(loop_n).size(); for (int loop_e = 0; loop_e < num_of_elements; loop_e++) { for (int loop_m = 0; loop_m < size_of_M_n; loop_m++) { pos_m = this->M_n(loop_n)(loop_m) - 1;//we count from 0 ro(loop_n)(loop_e) *= this->marginal_probs(pos_m, loop_n).r_mxn( loop_e); } //Use appropriate clipping method perform_clipping(ro(loop_n)(loop_e)); } //Note the following step is not strictly necessary apart from making the result //look neater - however it only adds a small overhead //normalise the result so that q_n_0+q_n_1=1 a_n = ro(loop_n).sum(); assertalways(a_n!=real(0.0)); ro(loop_n) /= a_n; } }
void sum_prod_alg_trad<GF_q, real>::spa_init(const array1vd_t& recvd_probs) { //initialise the marginal prob values int num_of_elements = GF_q::elements(); real tmp_prob = real(0.0); real alpha = real(0.0); //ensure we don't have zero probabilities //and normalise the probs at the same time this->received_probs.init(recvd_probs.size()); for (int loop_n = 0; loop_n < this->length_n; loop_n++) { this->received_probs(loop_n).init(num_of_elements); alpha = real(0.0); for (int loop_e = 0; loop_e < num_of_elements; loop_e++) { tmp_prob = recvd_probs(loop_n)(loop_e); //Clipping HACK perform_clipping(tmp_prob); this->received_probs(loop_n)(loop_e) = tmp_prob; alpha += tmp_prob; } assertalways(alpha!=real(0.0)); this->received_probs(loop_n) /= alpha; } //this uses the description of the algorithm as given by //MacKay in Information Theory, Inference and Learning Algorithms(2003) //on page 560 - chapter 47.3 //some helper variables int pos = 0; int non_zeros = 0; //simply set q_mxn(0)=P_n(0)=P(x_n=0) and q_mxn(1)=P_n(1)=P(x_n=1) for (int loop_m = 0; loop_m < this->dim_m; loop_m++) { non_zeros = this->N_m(loop_m).size(); for (int loop_n = 0; loop_n < non_zeros; loop_n++) { pos = this->N_m(loop_m)(loop_n) - 1;//we count from zero; this->marginal_probs(loop_m, pos).q_mxn = this->received_probs(pos); this->marginal_probs(loop_m, pos).r_mxn.init(num_of_elements); this->marginal_probs(loop_m, pos).r_mxn = 0.0; } } #if DEBUG>=2 libbase::trace << " Memory Usage:\n "; libbase::trace << this->marginal_probs.size() * sizeof(sum_prod_alg_abstract<GF_q,real>::marginals) / double(1 << 20) << " MB" << std::endl; libbase::trace << std::endl << "The marginal matrix is given by:" << std::endl; this->print_marginal_probs(libbase::trace); #endif }
void map_dividing<libbase::vector, dbl, dbl2>::doinverse(const array1vd_t& pin, array1vd_t& pout) const { // Confirm input sequence to be of the correct length assertalways(pin.size() == This::output_block_size()); // Create converter object and perform necessary transform libbase::symbol_converter<dbl,dbl2> converter(Base::M, Base::q); converter.aggregate_probabilities(pin, pout); }
void map_punctured<libbase::vector, dbl>::doinverse(const array1vd_t& pin, array1vd_t& pout) const { assertalways(pin.size() == This::output_block_size()); assertalways(pin(0).size() == Base::M); // final matrix size depends on the number of set positions libbase::allocate(pout, pattern.size(), Base::M); // invert the puncturing for (int i = 0, ii = 0; i < pattern.size(); i++) if (pattern(i)) pout(i) = pin(ii++); else pout(i) = dbl(1.0 / Base::M); }
template <class GF_q, class real> void sum_prod_alg_abstract<GF_q, real>::spa_iteration( array1vd_t& ro) { //carry out the horizontal step //this uses the description of the algorithm as given by //MacKay in Information Theory, Inference and Learning Algorithms(2003) //on page 560 - chapter 47.3 // r_mxn(0)=\sum_{x_n'|n'\in N(m)\n'} ( P(z_m=0|x_n=0) * \prod_{n'\in N(m)\n}q_mxn(x_{n') ) // Essentially, what we are doing is the following: // Assume x_n=0 // we need to sum over all possibilities that such that the parity check is satisfied, ie =0 // if the parity check is satisfied the conditional probability is 1 and 0 otherwise // so we are simply adding up the products for which the parity check is satisfied. //the number of symbols in N_m, eg the number of variables that participate in check m int size_N_m; //loop over all check nodes - the horizontal step for (int loop_m = 0; loop_m < this->dim_m; loop_m++) { // get the bits that participate in this check size_N_m = this->N_m(loop_m).size(); for (int loop_n = 0; loop_n < size_N_m; loop_n++) { //this will compute the relevant r_nms fixing the x_n given by loop_n this->compute_r_mn(loop_m, loop_n, this->N_m(loop_m)); } } #if DEBUG>=2 libbase::trace << std::endl << "After the horizontal step, the marginal matrix at col x is given by:" << std::endl; this->print_marginal_probs(3, libbase::trace); #endif //this array holds the checks that use symbol n array1i_t M_n; //the number of checks in that array int size_M_n; //loop over all the bit nodes - the vertical step for (int loop_n = 0; loop_n < this->length_n; loop_n++) { M_n = this->M_n(loop_n); size_M_n = M_n.size().length(); for (int loop_m = 0; loop_m < size_M_n; loop_m++) { this->compute_q_mn(loop_m, loop_n, M_n); } } #if DEBUG>=2 libbase::trace << "After the vertical step, the marginal matrix at col x is given by:" << std::endl; this->print_marginal_probs(3, libbase::trace); #endif //compute the new probabilities for all symbols given the information in this iteration. //This will be used in a tentative decoding to see whether we have found a codeword this->compute_probs(ro); #if DEBUG>=3 libbase::trace << "The newly computed normalised probabilities are given by:" << std::endl; ro.serialize(libbase::trace, ' '); #endif }