예제 #1
0
double combination(unsigned n, unsigned s){
	double total;
	if((n-s) > s){
		total = ((double) products(n,n-s))/products(s,0);
	}
	else{
		total = ((double) products(n,s))/products(n-s,0);
	}
	return total;
}
예제 #2
0
inline force_inline
double FastMKSRules<KernelType, TreeType>::BaseCase(
    const size_t queryIndex,
    const size_t referenceIndex)
{
    // Score() always happens before BaseCase() for a given node combination.  For
    // cover trees, the kernel evaluation between the two centroid points already
    // happened.  So we don't need to do it.  Note that this optimizes out if the
    // first conditional is false (its result is known at compile time).
    if (tree::TreeTraits<TreeType>::FirstPointIsCentroid)
    {
        if ((queryIndex == lastQueryIndex) &&
                (referenceIndex == lastReferenceIndex))
            return lastKernel;

        // Store new values.
        lastQueryIndex = queryIndex;
        lastReferenceIndex = referenceIndex;
    }

    ++baseCases;
    double kernelEval = kernel.Evaluate(querySet.unsafe_col(queryIndex),
                                        referenceSet.unsafe_col(referenceIndex));

    // Update the last kernel value, if we need to.
    if (tree::TreeTraits<TreeType>::FirstPointIsCentroid)
        lastKernel = kernelEval;

    // If the reference and query sets are identical, we still need to compute the
    // base case (so that things can be bounded properly), but we won't add it to
    // the results.
    if ((&querySet == &referenceSet) && (queryIndex == referenceIndex))
        return kernelEval;

    // If this is a better candidate, insert it into the list.
    if (kernelEval < products(products.n_rows - 1, queryIndex))
        return kernelEval;

    size_t insertPosition = 0;
    for ( ; insertPosition < products.n_rows; ++insertPosition)
        if (kernelEval >= products(insertPosition, queryIndex))
            break;

    InsertNeighbor(queryIndex, insertPosition, referenceIndex, kernelEval);

    return kernelEval;
}
예제 #3
0
double FastMKSRules<KernelType, TreeType>::Rescore(const size_t queryIndex,
        TreeType& /*referenceNode*/,
        const double oldScore) const
{
    const double bestKernel = products(products.n_rows - 1, queryIndex);

    return ((1.0 / oldScore) > bestKernel) ? oldScore : DBL_MAX;
}
예제 #4
0
 int integerBreak(int n) {
     if(n == 1) return 1;
     vector<int> products(n+1, 0);
     products[0] = 1;
     products[1] = 1;
     maxProduct(products, n);
     return products[n];
 }
예제 #5
0
void InternalCleanJob::start()
{
    try {
        ArtifactCleaner cleaner(logger(), observer());
        cleaner.cleanup(project(), products(), m_options);
    } catch (const ErrorInfo &error) {
        setError(error);
    }
    storeBuildGraph();
    emit finished(this);
}
예제 #6
0
int main(){
  std::vector<std::vector<int>> matrix;
  
  initMatrix(matrix);

  int toy = matrix[6][8] * matrix[7][9] * matrix[8][10] * matrix[9][11];
  std::cout << " toy example: " << toy << std::endl;

  std::cout << " the greatest product of 4 is " << products(matrix) << std::endl;

  return 0;
}
예제 #7
0
  /**
   * Add a single reaction to the mechanism. This routine
   * must be called after init() and before finalize().
   * This function branches on the types of reactions allowed
   * by the interfaceKinetics manager in order to install
   * the reaction correctly in the manager.
   * The manager allows the following reaction types
   *  Elementary
   *  Surface
   *  Global  
   * There is no difference between elementary and surface 
   * reactions.
   */
  void InterfaceKinetics::addReaction(const ReactionData& r) {

    /*
     * Install the rate coefficient for the current reaction
     * in the appropriate data structure.
     */
    addElementaryReaction(r);
    /*
     * Add the reactants and products for  m_ropnet;the current reaction
     * to the various stoichiometric coefficient arrays.
     */
    installReagents(r);
    /* 
     * Save the reaction and product groups, which are
     * part of the ReactionData class, in this class.
     * They aren't used for anything but reaction path
     * analysis.
     */
    //installGroups(reactionNumber(), r.rgroups, r.pgroups);
    /*
     * Increase the internal number of reactions, m_ii, by one.
     * increase the size of m_perturb by one as well.
     */
    incrementRxnCount();
    m_rxneqn.push_back(r.equation);

    m_rxnPhaseIsReactant.resize(m_ii, 0);
    m_rxnPhaseIsProduct.resize(m_ii, 0);
 
    int np = nPhases(); 
    int i = m_ii -1;
    m_rxnPhaseIsReactant[i] = new bool[np];
    m_rxnPhaseIsProduct[i] = new bool[np];

    for (int p = 0; p < np; p++) {
      m_rxnPhaseIsReactant[i][p] = false;
      m_rxnPhaseIsProduct[i][p] = false;
    }

    const vector_int& vr = reactants(i);
    for (int ik = 0; ik < (int) vr.size(); ik++) {
      int k = vr[ik];
      int p = speciesPhaseIndex(k);
      m_rxnPhaseIsReactant[i][p] = true;
    }
    const vector_int& vp = products(i);
    for (int ik = 0; ik < (int) vp.size(); ik++) {
      int k = vp[ik];
      int p = speciesPhaseIndex(k);
      m_rxnPhaseIsProduct[i][p] = true;
    }
 }
예제 #8
0
bool Warehouse::canProvideOrder(Order *order) {
    bool canProvide = true;
    vector<int> products(*(this->products));
    for(int i = 0 ; i < order->products->size() ; i++) {
        products.at(order->products->at(i))--;
        if(products.at(order->products->at(i)) < 0) {
            canProvide = false;
        }
    }

    order->buffer->at(this->warehouse_ID) = canProvide;
    return canProvide;
}
예제 #9
0
// The input is a plaintext table T[] and an array of encrypted bits
// I[], holding the binary representation of an index i into T.
// The output is the encrypted value T[i].
void tableLookup(Ctxt& out, const vector<zzX>& table, const CtPtrs& idx,
                 std::vector<zzX>* unpackSlotEncoding)
{
  FHE_TIMER_START;
  out.clear();
  vector<Ctxt> products(lsize(table), out); // to hold subset products of idx
  CtPtrs_vectorCt pWrap(products); // A wrapper

  // Compute all products of ecnrypted bits =: b_i
  computeAllProducts(pWrap, idx, unpackSlotEncoding);

  // Compute the sum b_i * T[i]
  NTL_EXEC_RANGE(lsize(table), first, last)
  for(long i=first; i<last; i++)
    products[i].multByConstant(table[i]); // p[i] = p[i]*T[i]
  NTL_EXEC_RANGE_END
  for(long i=0; i<lsize(table); i++)
    out += products[i];
}
예제 #10
0
// A counterpart of tableLookup. The input is an encrypted table T[]
// and an array of encrypted bits I[], holding the binary representation
// of an index i into T.  This function increments by one the entry T[i].
void tableWriteIn(const CtPtrs& table, const CtPtrs& idx,
                  std::vector<zzX>* unpackSlotEncoding)
{
  FHE_TIMER_START;
  const Ctxt* ct = table.ptr2nonNull(); // find some non-null Ctxt
  long size = lsize(table);
  if (size==0) return;
  std::vector<Ctxt> products(size, Ctxt(ZeroCtxtLike, *ct));  
  CtPtrs_vectorCt pWrap(products); // A wrapper

  // Compute all products of ecnrypted bits =: b_i
  computeAllProducts(pWrap, idx, unpackSlotEncoding);

  // incrememnt each entry of T[i] by products[i]
  NTL_EXEC_RANGE(lsize(table), first, last)
  for(long i=first; i<last; i++)
    *table[i] += products[i];
  NTL_EXEC_RANGE_END
}
예제 #11
0
void FastMKSRules<KernelType, TreeType>::InsertNeighbor(const size_t queryIndex,
        const size_t pos,
        const size_t neighbor,
        const double distance)
{
    // We only memmove() if there is actually a need to shift something.
    if (pos < (products.n_rows - 1))
    {
        int len = (products.n_rows - 1) - pos;
        memmove(products.colptr(queryIndex) + (pos + 1),
                products.colptr(queryIndex) + pos,
                sizeof(double) * len);
        memmove(indices.colptr(queryIndex) + (pos + 1),
                indices.colptr(queryIndex) + pos,
                sizeof(size_t) * len);
    }

    // Now put the new information in the right index.
    products(pos, queryIndex) = distance;
    indices(pos, queryIndex) = neighbor;
}
예제 #12
0
int main( int argc, char* argv[] )
{
  if ( argc != 2 ) {
    showUsage();
    return 0;
  }

  // Start the rng.
  RandomGenerator rng( 41u );
  UniformRandomSize sizeTypeRng( rng, boost::uniform_int< std::size_t >( 0,
    std::numeric_limits< std::size_t >::max() ) );

  // Parse the input file.
  std::ifstream input( argv[1] );
  std::string line;

  // The interaction database file.
  std::string xmlDatabaseFile;
  input >> xmlDatabaseFile; getline( input, line );
  InteractionDatabase myDatabase( xmlDatabaseFile );

  // Time and time step.
  double deltaTime, endTime, outputDeltaTime;
  input >> deltaTime; getline( input, line );
  input >> endTime; getline( input, line );

  // Output info.
  input >> outputDeltaTime; getline( input, line );
  std::size_t numberOfOutputBins;
  input >> numberOfOutputBins; getline( input, line );

    // Volume of the domain.
  double cellVolumeCubicMeters;
  input >> cellVolumeCubicMeters; getline( input, line );

  // Particle species, counts, and temperatures.
  std::multimap< std::string, ParticleInputSpec > particleInitialConditions;
  while ( true ) {
    std::string speciesName;
    std::size_t particleCount;
    double particleTemperature;
    Vec3 commonVelocity;
    input >> speciesName >> particleCount >> particleTemperature >> commonVelocity;
    if ( ! input.good() ) {
      break;
    } else {
      getline( input, line );
    }

    ParticleInputSpec inputSpec = { particleCount, particleTemperature, commonVelocity };
    particleInitialConditions.insert( std::make_pair( speciesName, inputSpec ) );
  }

  input.close();

  // Add all species types to the database.
  typedef std::multimap< std::string, ParticleInputSpec >::const_iterator ConstSpecIterator;
  for ( ConstSpecIterator speciesIterator = particleInitialConditions.begin();
    speciesIterator != particleInitialConditions.end();
    speciesIterator = particleInitialConditions.upper_bound( speciesIterator->first ) ) {
    myDatabase.addParticleType( speciesIterator->first );
  }

  // Throw away all interactions except elastic reactions.
//  myDatabase.filter = boost::make_shared< chimp::interaction::filter::Elastic >();

  // Finalize the database and do housekeeping.
  myDatabase.initBinaryInteractions();
  myDatabase.xmlDb.close();

  // Now make up some particles. We use the same indexing as the particle database to ease bookkeeping.
  typedef std::vector< Particle > AllParticlesOfOneSpecies;
  std::vector< AllParticlesOfOneSpecies > speciesSets( myDatabase.getProps().size() );
  for ( int i = 0; i < myDatabase.getProps().size(); ++i ) {

    // Find the pertinent particle specs.
    std::string speciesName = myDatabase[i].name::value;

    for ( ConstSpecIterator speciesIterator = particleInitialConditions.lower_bound( speciesName );
      speciesIterator != particleInitialConditions.upper_bound( speciesName ); ++speciesIterator ) {

      const ParticleInputSpec& spec = speciesIterator->second;

      // Give the particles some random velocities plus the common velocity.
      for ( std::size_t particleIndex = 0; particleIndex < spec.count; ++particleIndex ) {
        Particle particle;
        particle.velocity =
          randomVelocityFromMaxwellian( rng, spec.temperatureInKelvin, myDatabase[i].mass::value );
        particle.velocity += spec.driftVelocity;
        speciesSets[i].push_back( particle );
      }
    }
  }

  // Write some info for the reaction set.
  std::cout << "BEGIN INTERACTION TABLE" << std::endl;
  BOOST_FOREACH( const InteractionDatabase::Set& reactionBranches, myDatabase.getInteractions() ) {

    if ( reactionBranches.rhs.size() == 0 ) continue;

    std::cout << "  Reactions of "
      << myDatabase[ reactionBranches.lhs.A.species ].name::value << " with "
      << myDatabase[ reactionBranches.lhs.B.species ].name::value << std::endl;
    BOOST_FOREACH( const InteractionDatabase::Set::Equation& eq, reactionBranches.rhs ) {
      eq.print( std::cout << "    ", myDatabase ) << std::endl;
    }
  }
  std::cout << "END  INTERACTION  TABLE" << std::endl;

  // Loop over time steps.
  const double beginTime = 0.;
  for ( double time = beginTime; time < endTime; time += deltaTime ) {

    // Update particle positions, max speeds, and temperatures.
    std::vector< double > maxSpeedMetersPerSecond;
    std::vector< double > speciesTemperatures;
    std::size_t collisionCount = 0;
    std::size_t particleCount = 0;
    for ( int speciesIndex = 0; speciesIndex < speciesSets.size(); ++speciesIndex ) {
      maxSpeedMetersPerSecond.push_back( 0. );
      speciesTemperatures.push_back( 0. );
      BOOST_FOREACH( Particle& particle, speciesSets[ speciesIndex ] ) {
        // v = v + E * dt;
        // x = x + v * dt;
        double particleSpeedSquared = magnitudeSquared( particle.velocity );
        maxSpeedMetersPerSecond.back() =
          std::max( maxSpeedMetersPerSecond.back(), sqrt( particleSpeedSquared ) );
        speciesTemperatures.back() += particleSpeedSquared;
        ++particleCount;
      }
      speciesTemperatures.back() *= 0.5 * myDatabase[ speciesIndex ].mass::value; // kinetic energy
      speciesTemperatures.back() /= 1.5 * physical::constant::si::K_B * speciesSets[ speciesIndex ].size();
    }

    if ( time == beginTime ) {
      printConsoleData( myDatabase, speciesTemperatures, 0.0, 0, particleCount, true );
    }

    // Collide a few particles. This is done by looping over all reactions, choosing a few pairs of
    // the pertinent 2 species, and doing a null-collision algorithm on these pairs.
    BOOST_FOREACH( const InteractionDatabase::Set& reactionBranches, myDatabase.getInteractions() ) {

      if ( reactionBranches.rhs.size() == 0 ) continue;

      std::vector< std::size_t > particlesPerSpecies;
      std::vector< int > reactantIndices;
      reactantIndices.push_back( reactionBranches.lhs.A.species );
      reactantIndices.push_back( reactionBranches.lhs.B.species );
      std::size_t numberOfCollisions = 1;
      BOOST_FOREACH( const int& reactantIndex, reactantIndices ) {
        particlesPerSpecies.push_back( speciesSets[ reactantIndex ].size() );
        numberOfCollisions *= particlesPerSpecies.back();
      }

      // Round the number of collisions based on the remainder using MC method.
      double maxRelativeSpeed = std::accumulate( maxSpeedMetersPerSecond.begin(),
        maxSpeedMetersPerSecond.end(), 0. );
      double maxSigmaSpeedProduct = reactionBranches.findMaxSigmaVProduct( maxRelativeSpeed );
      double fractionalCollisions =
        numberOfCollisions * maxSigmaSpeedProduct * deltaTime / cellVolumeCubicMeters;
      numberOfCollisions = static_cast< std::size_t >( std::floor( fractionalCollisions ) );
      double partialCollision = fractionalCollisions - numberOfCollisions;
      UniformRandomDouble remainderSelector( rng, boost::uniform_real<>( 0., 1. ) );
      if ( remainderSelector() < partialCollision ) {
        ++numberOfCollisions;
      }

      // Setup some dummy vectors to hold arguments.
      std::vector< InteractionDatabase::options::Particle > reactants( 2 );
      std::vector< const InteractionDatabase::options::Particle* > reactantPointers( 2 );
      for ( int i = 0; i < 2; ++i ) {
        reactantPointers[i] = &reactants[i];
      }

      // Loop over all of the chosen collision pairs.
      for ( std::size_t collisionPair = 0; collisionPair < numberOfCollisions; ++collisionPair ) {

        // Choose the particles randomly, making sure we don't pick the same particle twice if this
        // is a like-pair collision.
        boost::array< std::size_t, 2 > particleIndex;
        particleIndex[0] = sizeTypeRng() % particlesPerSpecies[0];
        particleIndex[1] = sizeTypeRng() % particlesPerSpecies[1];
        if ( reactantIndices[0] == reactantIndices[1] ) {
          while ( particleIndex[1] == particleIndex[0] ) {
            particleIndex[1] = sizeTypeRng() % particlesPerSpecies[1];
          }
        }

        // Copy over the velocities.
        for ( int i = 0; i < 2; ++i ) {
          for ( int j = 0; j < 3; ++j )
            reactants[i].v[j] =
              speciesSets[ reactantIndices[i] ][ particleIndex[i] ].velocity[j];
        }

        double relativeSpeed = magnitude( reactants[0].v - reactants[1].v );
        if ( relativeSpeed == 0.0 ) {
          continue;
        }

        // Find which branch to take using null collision method.
        std::pair< int, double > path =
          reactionBranches.calculateOutPath( maxSigmaSpeedProduct, relativeSpeed );

        // Skip it if there is no collision.
        if ( path.first < 0 ) {
          continue;
        }

        const InteractionDatabase::Set::Equation& selectedPath = reactionBranches.rhs[ path.first ];

        InteractionDatabase::Interaction::ParticleParam defaultParam;
        defaultParam.is_set = false; // because it doesn't have a default constructor yet.
        std::vector< InteractionDatabase::Interaction::ParticleParam > products( 5, defaultParam );
        selectedPath.interaction->interact( reactantPointers, products );
        ++collisionCount;

        // For elastic collisions, we just copy the velocities of the products.
        for ( int i = 0; i < 2; ++i ) {
          std::vector< Particle >& species = speciesSets[ reactantIndices[i] ];
          if ( products[i].is_set ) {
            // The original reactant was modified, but was not consumed.
            for ( int j = 0; j < 3; ++j ) {
              species[ particleIndex[i] ].velocity[j] = products[i].particle.v[j];
            }
          } else {
            // The reactant was consumed. Remove it.
            species.erase( species.begin() + particleIndex[i] );
          }
        }

        // Add the new particles if there were any created.
        for ( int i = 2; i < 5; ++i ) {
          if ( products[i].is_set ) {
            Particle newParticle;
            for ( int j = 0; j < 3; ++j ) {
              newParticle.velocity[j] = products[i].particle.v[j];
            }
            speciesSets[ selectedPath.products[i-2].species ].push_back( newParticle );
          }
        }

      } // collisionPair
    } // interaction
예제 #13
0
double FastMKSRules<KernelType, TreeType>::CalculateBound(TreeType& queryNode)
const
{
    // We have four possible bounds -- just like NeighborSearchRules, but they are
    // slightly different in this context.
    //
    // (1) min ( min_{all points p in queryNode} P_p[k],
    //           min_{all children c in queryNode} B(c) );
    // (2) max_{all points p in queryNode} P_p[k] + (worst child distance + worst
    //           descendant distance) sqrt(K(I_p[k], I_p[k]));
    // (3) max_{all children c in queryNode} B(c) + <-- not done yet.  ignored.
    // (4) B(parent of queryNode);
    double worstPointKernel = DBL_MAX;
    double bestAdjustedPointKernel = -DBL_MAX;

    const double queryDescendantDistance = queryNode.FurthestDescendantDistance();

    // Loop over all points in this node to find the best and worst.
    for (size_t i = 0; i < queryNode.NumPoints(); ++i)
    {
        const size_t point = queryNode.Point(i);
        if (products(products.n_rows - 1, point) < worstPointKernel)
            worstPointKernel = products(products.n_rows - 1, point);

        if (products(products.n_rows - 1, point) == -DBL_MAX)
            continue; // Avoid underflow.

        // This should be (queryDescendantDistance + centroidDistance) for any tree
        // but it works for cover trees since centroidDistance = 0 for cover trees.
        const double candidateKernel = products(products.n_rows - 1, point) -
                                       queryDescendantDistance *
                                       referenceKernels[indices(indices.n_rows - 1, point)];

        if (candidateKernel > bestAdjustedPointKernel)
            bestAdjustedPointKernel = candidateKernel;
    }

    // Loop over all the children in the node.
    double worstChildKernel = DBL_MAX;

    for (size_t i = 0; i < queryNode.NumChildren(); ++i)
    {
        if (queryNode.Child(i).Stat().Bound() < worstChildKernel)
            worstChildKernel = queryNode.Child(i).Stat().Bound();
    }

    // Now assemble bound (1).
    const double firstBound = (worstPointKernel < worstChildKernel) ?
                              worstPointKernel : worstChildKernel;

    // Bound (2) is bestAdjustedPointKernel.
    const double fourthBound = (queryNode.Parent() == NULL) ? -DBL_MAX :
                               queryNode.Parent()->Stat().Bound();

    // Pick the best of these bounds.
    const double interA = (firstBound > bestAdjustedPointKernel) ? firstBound :
                          bestAdjustedPointKernel;
//  const double interA = 0.0;
    const double interB = fourthBound;

    return (interA > interB) ? interA : interB;
}
예제 #14
0
double FastMKSRules<KernelType, TreeType>::Score(const size_t queryIndex,
        TreeType& referenceNode)
{
    // Compare with the current best.
    const double bestKernel = products(products.n_rows - 1, queryIndex);

    // See if we can perform a parent-child prune.
    const double furthestDist = referenceNode.FurthestDescendantDistance();
    if (referenceNode.Parent() != NULL)
    {
        double maxKernelBound;
        const double parentDist = referenceNode.ParentDistance();
        const double combinedDistBound = parentDist + furthestDist;
        const double lastKernel = referenceNode.Parent()->Stat().LastKernel();
        if (kernel::KernelTraits<KernelType>::IsNormalized)
        {
            const double squaredDist = std::pow(combinedDistBound, 2.0);
            const double delta = (1 - 0.5 * squaredDist);
            if (lastKernel <= delta)
            {
                const double gamma = combinedDistBound * sqrt(1 - 0.25 * squaredDist);
                maxKernelBound = lastKernel * delta +
                                 gamma * sqrt(1 - std::pow(lastKernel, 2.0));
            }
            else
            {
                maxKernelBound = 1.0;
            }
        }
        else
        {
            maxKernelBound = lastKernel +
                             combinedDistBound * queryKernels[queryIndex];
        }

        if (maxKernelBound < bestKernel)
            return DBL_MAX;
    }

    // Calculate the maximum possible kernel value, either by calculating the
    // centroid or, if the centroid is a point, use that.
    ++scores;
    double kernelEval;
    if (tree::TreeTraits<TreeType>::FirstPointIsCentroid)
    {
        // Could it be that this kernel evaluation has already been calculated?
        if (tree::TreeTraits<TreeType>::HasSelfChildren &&
                referenceNode.Parent() != NULL &&
                referenceNode.Point(0) == referenceNode.Parent()->Point(0))
        {
            kernelEval = referenceNode.Parent()->Stat().LastKernel();
        }
        else
        {
            kernelEval = BaseCase(queryIndex, referenceNode.Point(0));
        }
    }
    else
    {
        const arma::vec queryPoint = querySet.unsafe_col(queryIndex);
        arma::vec refCentroid;
        referenceNode.Centroid(refCentroid);

        kernelEval = kernel.Evaluate(queryPoint, refCentroid);
    }

    referenceNode.Stat().LastKernel() = kernelEval;

    double maxKernel;
    if (kernel::KernelTraits<KernelType>::IsNormalized)
    {
        const double squaredDist = std::pow(furthestDist, 2.0);
        const double delta = (1 - 0.5 * squaredDist);
        if (kernelEval <= delta)
        {
            const double gamma = furthestDist * sqrt(1 - 0.25 * squaredDist);
            maxKernel = kernelEval * delta +
                        gamma * sqrt(1 - std::pow(kernelEval, 2.0));
        }
        else
        {
            maxKernel = 1.0;
        }
    }
    else
    {
        maxKernel = kernelEval + furthestDist * queryKernels[queryIndex];
    }

    // We return the inverse of the maximum kernel so that larger kernels are
    // recursed into first.
    return (maxKernel > bestKernel) ? (1.0 / maxKernel) : DBL_MAX;
}
void Calibrator::xval(const char* fname)
{
    ofstream ofs(fname);
    vector<vector<vector<float > > > *d = sequences->at(0)->get_raw_template_data();
    int size = d->size();
    vector<vector<vector<int> > > guesses; //omitted period, sequence template ommited from, then period index of guess
    for(int omit = 0; omit != size; ++ omit)
    {
        guesses.push_back(vector<vector<int > >());
        vector<vector<float> > ntemplate(NUMBER_OF_CHANNELS, vector<float>((*d)[0][0].size(), 0.0));
        for(int seq = 0; seq != sequences->number(); ++ seq)//seq is seq to omit from
        {
            guesses.back().push_back(vector<int>(size, -1));
            d = sequences->at(seq)->get_raw_template_data();
            for(int i = 0; i != size; ++i)
            {
                if(i == omit)
                    continue;
                for(int ch = 0; ch != NUMBER_OF_CHANNELS; ++ ch)
                {
                    for(int j = 0; j!= (*d)[i][ch].size(); ++ j)
                    {
                        ntemplate[ch][j] += (*d)[i][ch][j];
                    }
                }
            }
            for(int ch = 0; ch != NUMBER_OF_CHANNELS; ++ ch)
            {
                #ifndef MONOENERGETIC_TEMPLATE
                #define MONOENERGETIC_TEMPLATE 1
                #endif
                #if MONOENERGETIC_TEMPLATE
                //Normalize
                float energy = 0.0;
                for(int i = 0; i != ntemplate[ch].size(); ++i)
                {
                    energy += ntemplate[ch][i]* ntemplate[ch][i];
                }
                float factor = sqrt(energy);
                for(int i = 0; i != ntemplate[ch].size(); ++i)
                {
                    ntemplate[ch][i] /= (factor *(PERIODS_TO_AVERAGE -1));
                }
                #else
                //don't normalize
                for(int i = 0; i != ntemplate[ch].size(); ++i)
                {
                    ntemplate[ch][i] /= (PERIODS_TO_AVERAGE -1);
                }
                #endif
            }
            vector<vector<double> > scores;

            vector<double> blank_score_row(sequences->number(), 0.0);

            for(int i = 0; i != size; ++i)
            {
                scores.push_back(blank_score_row);
                for(int seq_index2 = 0 ; seq_index2 != sequences->number(); ++ seq_index2)
                {
                    vector<vector<float > > *t;
                    vector<double> products(NUMBER_OF_CHANNELS, 0.0);
                    if(seq == seq_index2)//if this is the sequence we made a new template for
                    {
                        t = &ntemplate;
                    }
                    else
                    {
                        t = sequences->at(seq_index2)->get_template();
                    }
                    maxtrix_element_product(t->begin(), t->end(), (*d)[i].begin(), products.begin());
                    double s = accumulate(products.begin(), products.end(), double(0.0));
                    scores.back()[seq_index2] = s;
                }
                int guess = std::max_element(scores.back().begin(), scores.back().end()) - scores.back().begin();
                (guesses.back().back())[i] = guess;
            }
        }

    }

    for(int seq = 0; seq != sequences->number(); ++ seq)
    {
        vector<int> accumulated_guesses(sequences->number(), 0);
        for(int i = 0; i != size; ++i)
        {
            for(int j = 0; j!= size; ++ j)
            {
                ++accumulated_guesses[guesses[i][seq][j]];
            }
        }
        int most_guesses = max_element(accumulated_guesses.begin(), accumulated_guesses.end()) - accumulated_guesses.begin();
        ofs << "ommiting from sequence " << seq << ", correct guesses: "
        << double(accumulated_guesses[seq]) *100.0/ (size * size) << " % " << endl;

        ofs << "most guesses for sequence: " << most_guesses << " guesses: "
        << double(accumulated_guesses[most_guesses]) *100.0/ (size * size) << " %"
        << endl << endl;
    }
}