Exemple #1
0
void prof_burst::updateresults(libbase::vector<double>& result,
      const libbase::vector<int>& source, const libbase::vector<int>& decoded) const
   {
   assert(source.size() == get_symbolsperblock());
   assert(decoded.size() == get_symbolsperblock());
   // Update the relevant count for every symbol in error
   // Check the first symbol first
   assert(source(0) != fsm::tail);
   if (source(0) != decoded(0))
      result(0)++;
   // For each remaining symbol
   for (int t = 1; t < get_symbolsperblock(); t++)
      {
      if (source(t - 1) != decoded(t - 1))
         result(3)++;
      assert(source(t) != fsm::tail);
      if (source(t) != decoded(t))
         {
         // Keep separate counts, depending on whether the previous symbol was in error
         if (source(t - 1) != decoded(t - 1))
            result(2)++;
         else
            result(1)++;
         }
      }
   }
Exemple #2
0
void readnextblock(std::istream& sin, libbase::vector<S>& result,
      int blocklength, const libbase::size_type<libbase::vector>& txsize)
   {
   if (blocklength > 0)
      result.init(blocklength);
   else
      result.init(txsize);
   result.serialize(sin);
   }
void experiment_normal::accumulate_state(const libbase::vector<double>& state)
   {
   assert(state.size() > 0);
   // divide state into constituent components and accumulate
   const int n = state.size() / 2;
   assert(state.size() == 2 * n);
   safe_accumulate(sum, state.extract(0, n));
   safe_accumulate(sumsq, state.extract(n, n));
   }
void lut_interleaver<real>::transform(const libbase::vector<int>& in,
      libbase::vector<int>& out) const
   {
   const int tau = lut.size();
   assertalways(in.size() == tau);
   out.init(in.size());
   for (int t = 0; t < tau; t++)
      if (lut(t) == fsm::tail)
         out(t) = fsm::tail;
      else
         out(t) = in(lut(t));
   }
Exemple #5
0
void readsingleblock(std::istream& sin, libbase::vector<S>& result,
      int blocklength)
   {
   std::list<S> items;
   // Skip any preceding comments and whitespace
   sin >> libbase::eatcomments;
   // Repeat until end of stream
   while (!sin.eof())
      {
      S x;
      sin >> x >> libbase::eatcomments;
      assertalways(sin.good() || sin.eof());
      items.push_back(x);
      }
   std::cerr << "Read block of length = " << items.size() << std::endl;
   // truncate if necessary
   if (blocklength > 0 && blocklength < int(items.size()))
      {
      items.resize(blocklength);
      std::cerr << "Truncated to length = " << items.size() << std::endl;
      }
   // copy to required object type
   result.init(items.size());
   typename std::list<S>::iterator i;
   int j;
   for (i = items.begin(), j = 0; i != items.end(); i++, j++)
      result(j) = *i;
   }
void experiment_binomial::derived_accumulate(
      const libbase::vector<double>& result)
   {
   assert(result.size() > 0);
   // accumulate results
   safe_accumulate(sum, result);
   }
void experiment_binomial::estimate(libbase::vector<double>& estimate,
      libbase::vector<double>& stderror) const
   {
   assert(count() == sum.size());
   // initialize space for results
   estimate.init(count());
   stderror.init(count());
   // compute results
   assert(get_samplecount() > 0);
   for (int i = 0; i < count(); i++)
      {
      // estimate is the proportion
      estimate(i) = sum(i) / double(get_samplecount(i));
      // standard error is sqrt(p(1-p)/n)
      stderror(i) = sqrt((estimate(i) * (1 - estimate(i)))
            / double(get_samplecount(i)));
      }
   }
void experiment_normal::derived_accumulate(
      const libbase::vector<double>& result)
   {
   assert(result.size() > 0);
   // accumulate results
   libbase::vector<double> sample = result;
   safe_accumulate(sum, sample);
   sample.apply(square);
   safe_accumulate(sumsq, sample);
   }
void experiment_normal::get_state(libbase::vector<double>& state) const
   {
   assert(count() == sum.size());
   assert(count() == sumsq.size());
   state.init(2 * count());
   for (int i = 0; i < count(); i++)
      {
      state(i) = sum(i);
      state(count() + i) = sumsq(i);
      }
   }
Exemple #10
0
void ccbfsm::reset(const libbase::vector<int>& state)
   {
   fsm::reset(state);
   assert(state.size() == nu);
   reg = 0;
   int j = 0;
   for (int t = 0; t < nu; t++)
      for (int i = 0; i < k; i++)
         if (reg(i).size() > t)
            reg(i) |= bitfield(state(j++) << t, reg(i).size());
   assert(j == nu);
   }
void experiment_normal::estimate(libbase::vector<double>& estimate,
      libbase::vector<double>& stderror) const
   {
   assert(count() == sum.size());
   assert(count() == sumsq.size());
   // estimate is the mean value
   assert(get_samplecount() > 0);
   estimate = sum / double(get_samplecount());
   // standard error is sigma/sqrt(n)
   stderror.init(count());
   if (get_samplecount() > 1)
      for (int i = 0; i < count(); i++)
         stderror(i) = sqrt((sumsq(i) / double(get_samplecount()) - estimate(i)
               * estimate(i)) / double(get_samplecount() - 1));
   else
      stderror = std::numeric_limits<double>::max();
   }
void experiment_binomial::accumulate_state(const libbase::vector<double>& state)
   {
   assert(state.size() > 0);
   // accumulate results from saved state
   safe_accumulate(sum, state);
   }
Exemple #13
0
void commsys_stream_simulator<S, R>::sample(libbase::vector<double>& result)
   {
   do
      {
      // Advance by one frame
      received_prev = received_this;
      source_this = source_next;
      received_this = received_next;
      // Create next source frame
      source_next = Base::createsource();
      // Encode -> Map -> Modulate next frame
      libbase::vector<S> transmitted = sys_tx->encode_path(source_next);
      // Transmit next frame
      received_next = sys_tx->transmit(transmitted);
#ifndef NDEBUG
      // update counters
      frames_encoded++;
#endif
      } while (source_this.size() == 0);

   // Shorthand for transmitted and received frame sizes
   const int tau = this->sys->output_block_size();
   const int rho = received_this.size();

   // Get access to the commsys channel object in stream-oriented mode
   const bsid& c = dynamic_cast<const bsid&> (*this->sys->getchan());
   // Determine start-of-frame and end-of-frame probabilities
   libbase::vector<double> sof_prior;
   libbase::vector<double> eof_prior;
   libbase::size_type<libbase::vector> offset;
   if (eof_post.size() == 0) // this is the first frame
      {
      // Initialize as drift pdf after transmitting one frame
      c.get_drift_pdf(tau, eof_prior, offset);
      eof_prior /= eof_prior.max();
      // Initialize as zero-drift is assured
      sof_prior.init(eof_prior.size());
      sof_prior = 0;
      sof_prior(0 + offset) = 1;
      // Initialize previous frame so we have something to copy
      received_prev.init(offset);
      received_prev = 0; // value not important as content is unused
      }
   else
      {
      // Use previous (centralized) end-of-frame posterior probability
      sof_prior = eof_post;
      // Initialize as drift pdf after transmitting one frame, given sof priors
      c.get_drift_pdf(tau, sof_prior, eof_prior, offset);
      eof_prior /= eof_prior.max();
      }

   // Assemble stream
   libbase::vector<S> stream = concatenate(received_prev, received_this,
         received_next);
   // Extract received vector
   const int start = received_prev.size() - offset + drift_error;
   const int length = tau + eof_prior.size() - 1;
   assertalways(start >= 0 && start <= stream.size());
   assertalways(length >= 0 && length <= stream.size() - start);
   libbase::vector<S> received = stream.extract(start, length);

   // Get access to the commsys object in stream-oriented mode
   commsys_stream<S>& s = dynamic_cast<commsys_stream<S>&> (*this->sys);
   // Demodulate -> Inverse Map -> Translate
   s.receive_path(received, sof_prior, eof_prior, offset);
   // Store posterior end-of-frame drift probabilities
   eof_post = s.get_eof_post();
#ifndef NDEBUG
   // update counters
   frames_decoded++;
#endif

   // Determine estimated drift
   int drift;
   eof_post.max(drift);
   drift -= offset;
   // Centralize posterior probabilities
   eof_post = 0;
   const int sh_a = std::max(0, -drift);
   const int sh_b = std::max(0, drift);
   const int sh_n = eof_post.size() - abs(drift);
   eof_post.segment(sh_a, sh_n) = s.get_eof_post().extract(sh_b, sh_n);
   // Determine actual cumulative drift and error in drift estimation
   cumulative_drift += rho - tau;
   drift_error += drift - (rho - tau);
#ifndef NDEBUG
   std::cerr << "DEBUG (commsys_stream_simulator): Actual acc. drift at sof = "
         << cumulative_drift - (rho - tau) << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Actual frame drift = "
         << rho - tau << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Actual acc. drift at eof = "
         << cumulative_drift << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Acc. drift error at sof = "
         << drift_error - (drift - (rho - tau)) << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Estimated frame drift = "
         << drift << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Acc. drift error at eof = "
         << drift_error << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Frames encoded = "
         << frames_encoded << std::endl;
   std::cerr << "DEBUG (commsys_stream_simulator): Frames decoded = "
         << frames_decoded << std::endl;
#endif

   // Initialise result vector
   result.init(Base::count());
   result = 0;
   // For every iteration
   libbase::vector<int> decoded;
   for (int i = 0; i < this->sys->num_iter(); i++)
      {
      // Decode & update results
      this->sys->decode(decoded);
      R::updateresults(result, i, source_this, decoded);
      }
   // Keep record of what we last simulated
   this->last_event = concatenate(source_this, decoded);
   }