static inline
    void discard_clusters(Turns& turns, Clusters const& clusters,
            Geometry0 const& geometry0, Geometry1 const& geometry1)
    {
        for (typename Clusters::const_iterator cit = clusters.begin();
             cit != clusters.end(); ++cit)
        {
            signed_size_type cluster_id = cit->first;

            // If there are only self-turns in the cluster, the cluster should
            // be located within the other geometry, for intersection
            if (is_self_cluster(cluster_id, turns, clusters))
            {
                cluster_info const& cinfo = cit->second;
                if (! within(turns[*cinfo.turn_indices.begin()], geometry0, geometry1))
                {
                    // Discard all turns in cluster
                    for (std::set<signed_size_type>::const_iterator sit = cinfo.turn_indices.begin();
                         sit != cinfo.turn_indices.end(); ++sit)
                    {
                        turns[*sit].discarded = true;
                    }
                }
            }
        }
    }
	void FillBaseMetrics() {
		metrics_.original_metrics.num_clusters = original_clusters_.ClustersNumber();
		metrics_.constructed_metrics.num_clusters = constructed_clusters_.ClustersNumber();
		ComputeSingletonMetrics();
		ComputeMaxClusterMetrics();
		metrics_.used_reads = double(constructed_reads_.size()) / original_reads_.size() * 100;
	}
	bool IsOriginalClusterLost(size_t orig_cluster_ind) {
		auto reads = original_clusters_.GetReadsByCluster(orig_cluster_ind);
		for(auto read = reads.begin(); read != reads.end(); read++)
			if(constructed_clusters_.ReadExists(original_reads_[*read]))
				return false;
		return true;
	}
Esempio n. 4
0
void InfoModel::ModelWithCluster( InfoModel &model, Clusters &cluster )
{
	model.energy = cluster.GetEnergy();
	model.N = cluster.GetAtomsNumber();
	model.alloy = cluster.GetAlloy();
	model.alloyNum = cluster.GetAlloyNum();
}
Esempio n. 5
0
    void visit_clusters(Clusters const& clusters, Turns const& turns)
    {
        typedef typename boost::range_value<Turns>::type turn_type;
        int index = 0;
        BOOST_FOREACH(turn_type const& turn, turns)
        {
            if (turn.cluster_id >= 0)
            {
                std::cout << " TURN: " << index << "  part of cluster "  << turn.cluster_id << std::endl;
            }
            index++;
        }

        for (typename Clusters::const_iterator it = clusters.begin(); it != clusters.end(); ++it)
        {
            std::cout << " CLUSTER " << it->first << ": ";
            for (typename std::set<bg::signed_size_type>::const_iterator sit
                 = it->second.turn_indices.begin();
                 sit != it->second.turn_indices.end(); ++sit)
            {
                std::cout << " "  << *sit;
            }
            std::cout << std::endl;
        }

        std::cout << std::endl;

    }
Esempio n. 6
0
vector<Int_t> * Tracks::getConflictingTracksFromTrack(Int_t trackIdx) {
   Int_t             nClusters, nClustersReal;
   Cluster         * conflictCluster = nullptr;
   Clusters        * conflictClusters = nullptr;
   vector<Int_t>   * conflictingTracks = new vector<Int_t>;
   vector<Int_t>   * possibleTracks = nullptr;
   conflictingTracks->reserve(5);
   
   Track * trackA = At(trackIdx);
   if (!trackA) { return conflictingTracks; }

   conflictClusters = trackA->getConflictClusters();
   nClusters = conflictClusters->GetEntriesFast();
   nClustersReal = conflictClusters->GetEntries();

   for (Int_t i=0; i<nClusters; i++) {
      conflictCluster = conflictClusters->At(i);
      if (!conflictCluster) continue;

      possibleTracks = getTracksFromCluster(conflictCluster);

      for (UInt_t j=0; j<possibleTracks->size(); j++) {
         if (!isItemInVector(possibleTracks->at(j), conflictingTracks)) {
            conflictingTracks->push_back(possibleTracks->at(j));
         }
      }
   }

   return conflictingTracks;
}
	bool IsClusterNotMerged(size_t orig_cluster_ind) {
		vector<string> read_names = GetReadNamesFromCluster(orig_cluster_ind, original_clusters_, original_reads_);
		set<size_t> constr_cluster_inds;
		for(auto it = read_names.begin(); it != read_names.end(); it++)
			if(constructed_clusters_.ReadExists(*it))
				constr_cluster_inds.insert(constructed_clusters_.GetCluster(*it));
		return constr_cluster_inds.size() > 1;
	}
	ClustersEvaluator(char *original_fname, char *constructed_fname) :
		original_clusters_(original_fname),
		constructed_clusters_(constructed_fname),
		constructed_fname_(constructed_fname) {
		cout << "Extraction of original clusters from " << original_fname << endl;
		original_clusters_.ExtractFromFile();
		cout << "Extraction of constructed clusters from " << constructed_fname << endl;
		constructed_clusters_.ExtractFromFile();
	}
	void Evaluate() {
		original_reads_ = original_clusters_.Reads();
		constructed_reads_ = constructed_clusters_.Reads();

		FillBaseMetrics();
		ComputeNotMergedMetrics();
		ComputeErrorMetrics();
		ComputeLostClustersMetrics();
	}
Esempio n. 10
0
Tracks * getTracksFromClusters(Int_t Runs, Int_t dataType, Int_t frameType, Float_t energy) {
   run_energy = energy;

   DataInterface   * di = new DataInterface();
   Int_t             nClusters = kEventsPerRun * 5 * nLayers;
   Int_t             nTracks = kEventsPerRun * 2;
   Bool_t            breakSignal = false;
   Clusters        * clusters = new Clusters(nClusters);
   Tracks          * tracks = new Tracks(nTracks);
   Tracks          * allTracks = new Tracks(nTracks * Runs);

   for (Int_t i=0; i<Runs; i++) {
      showDebug("Start getMCClusters\n");
      di->getMCClusters(i, clusters);

      showDebug("Finding calorimeter tracks\n");
      tracks = clusters->findCalorimeterTracksWithMCTruth();

      if (tracks->GetEntriesFast() == 0) breakSignal = kTRUE; // to stop running

      // Track improvements
      Int_t nTracksBefore = 0, nTracksAfter = 0;
      Int_t nIsInelastic = 0, nIsNotInelastic = 0;
      
      tracks->extrapolateToLayer0();
      nTracksBefore = tracks->GetEntries();
      tracks->removeTracksLeavingDetector();
      nTracksAfter = tracks->GetEntries();
      
      cout << "Of " << nTracksBefore << " tracks, " << nTracksBefore - nTracksAfter << " (" << 100* ( nTracksBefore - nTracksAfter) / ( (float) nTracksBefore ) << "%) were lost when leaving the detector.\n";
      
      tracks->removeTrackCollisions();
      // tracks->retrogradeTrackImprovement(clusters);

      tracks->Compress();
      tracks->CompressClusters();
      
      for (Int_t j=0; j<tracks->GetEntriesFast(); j++) {
         if (!tracks->At(j)) continue;

         allTracks->appendTrack(tracks->At(j));
      }

      allTracks->appendClustersWithoutTrack(clusters->getClustersWithoutTrack());

      clusters->clearClusters();
      tracks->Clear();

      if (breakSignal) break;
   }

   delete clusters;
   delete tracks;
   delete di;

   return allTracks;
}
	void ComputeConstructedSingletons() {
		for(auto it = constructed_clusters_.clusters_begin(); it != constructed_clusters_.clusters_end(); it++)
					if(constructed_clusters_.IsClusterSingleton(it->first)) {
						metrics_.constructed_metrics.num_singletons++;
						auto read_name = constructed_reads_[(it->second)[0]];
						if(original_clusters_.IsClusterSingleton(original_clusters_.GetCluster(read_name)))
							metrics_.constructed_metrics.num_corr_singletons++;
					}
		metrics_.correct_singletons = metrics_.constructed_metrics.num_corr_singletons;
	}
	void ComputeLostClustersMetrics() {
		size_t lost_cluster_size = 0;
		for(auto it = original_clusters_.clusters_begin(); it != original_clusters_.clusters_end();
				it++) {
			if(IsOriginalClusterLost(it->first)) {
				metrics_.lost_clusters_number++;
				lost_cluster_size += original_clusters_.ClusterSize(it->first);
			}
		}
		metrics_.lost_clusters_size = double(lost_cluster_size) / original_reads_.size();
	}
	void ComputeErrorMetrics() {
		metrics_.constructed_metrics.num_errors = 0;
		for(auto it = constructed_clusters_.clusters_begin(); it != constructed_clusters_.clusters_end(); it++) {
			if(!IsConstructedClusterNotErroneous(it->first))
				metrics_.constructed_metrics.num_errors++;
			else {
				size_t orig_supercluster = GetOriginalSuperCluster(it->first);
				if(constructed_clusters_.ClusterSize(it->first) > original_clusters_.ClusterSize(orig_supercluster))
				metrics_.constructed_metrics.num_errors++;
			}
		}
	}
	bool IsClusterBigAndSingletons(size_t orig_cluster_ind) {
		assert(IsClusterNotMerged(orig_cluster_ind));
		vector<string> read_names = GetReadNamesFromCluster(orig_cluster_ind, original_clusters_, original_reads_);

		set<size_t> not_trivial_clusters;
		for(auto it = read_names.begin(); it != read_names.end(); it++) {
			if(constructed_clusters_.GetClusterSizeByReadName(*it) > 1) {
				not_trivial_clusters.insert(constructed_clusters_.GetCluster(*it));
			}
		}
		return not_trivial_clusters.size() == 1;
	}
void ClusterControllerDialog::setClusters(const Clusters &  clusters)
{
    int cluster_count(clusters.clusterCount());

    m_cluster_info_table->clearContents();
    m_cluster_info_table->setColumnCount(cluster_count);

    for(int cluster_id(0); cluster_id < cluster_count; cluster_id++)
    {
        ClusterData cluster_data(clusters.getClusterData(cluster_id));
        if(cluster_data.member_count > 0)
            m_cluster_info_table->addCluster(clusters.getClusterData(cluster_id), cluster_id);
    }

    m_valid = true;
}
	vector<string> GetReadNamesFromCluster(size_t cluster_ind, Clusters &clusters, vector<string> &reads) {
		vector<size_t> read_inds = clusters.GetReadsByCluster(cluster_ind);
		vector<string> read_names;
		for(auto it = read_inds.begin(); it != read_inds.end(); it++)
			read_names.push_back(reads[*it]);
		return read_names;
	}
	double ComputeNotMergedFillin(size_t orig_cluster_ind) {
		vector<string> read_names = GetReadNamesFromCluster(orig_cluster_ind, original_clusters_, original_reads_);
		map<size_t, vector<size_t> > constr_subclusters;
		for(auto it = read_names.begin(); it != read_names.end(); it++) {
			size_t cluster_ind = constructed_clusters_.GetCluster(*it);
			constr_subclusters[constructed_clusters_.ClusterSize(cluster_ind)].push_back(cluster_ind);
		}

		for(auto it = constr_subclusters.rbegin(); it != constr_subclusters.rend(); it++) {
			auto clusters = it->second;
			for(auto cluster_ind = clusters.begin(); cluster_ind != clusters.end(); cluster_ind++)
				if(IsConstructedClusterNotErroneous(*cluster_ind)) {
					return double(constructed_clusters_.ClusterSize(*cluster_ind)) / original_clusters_.ClusterSize(orig_cluster_ind);
				}
		}
		return 0;
	}
	bool IsConstructedClusterNotErroneous(size_t constr_cluster_ind) {
		vector<string> read_names = GetReadNamesFromCluster(constr_cluster_ind,
				constructed_clusters_, constructed_reads_);
		set<size_t> orig_clusters_inds;
		for(auto it = read_names.begin(); it != read_names.end(); it++)
			orig_clusters_inds.insert(original_clusters_.GetCluster(*it));
		return orig_clusters_inds.size() == 1;
	}
	size_t GetOriginalSuperCluster(size_t constr_cluster_ind) {
		vector<string> read_names = GetReadNamesFromCluster(constr_cluster_ind,
				constructed_clusters_, constructed_reads_);
		set<size_t> orig_clusters_inds;
		for(auto it = read_names.begin(); it != read_names.end(); it++)
			orig_clusters_inds.insert(original_clusters_.GetCluster(*it));
		assert(orig_clusters_inds.size() == 1);
		return *(orig_clusters_inds.begin());
	}
 static inline
 bool any_blocked(signed_size_type cluster_id,
         const Turns& turns, Clusters const& clusters)
 {
     typename Clusters::const_iterator cit = clusters.find(cluster_id);
     if (cit == clusters.end())
     {
         return false;
     }
     cluster_info const& cinfo = cit->second;
     for (std::set<signed_size_type>::const_iterator it
          = cinfo.turn_indices.begin();
          it != cinfo.turn_indices.end(); ++it)
     {
         typename boost::range_value<Turns>::type const& turn = turns[*it];
         if (turn.any_blocked())
         {
             return true;
         }
     }
     return false;
 }
    static inline
    bool is_self_cluster(signed_size_type cluster_id,
            const Turns& turns, Clusters const& clusters)
    {
        typename Clusters::const_iterator cit = clusters.find(cluster_id);
        if (cit == clusters.end())
        {
            return false;
        }

        cluster_info const& cinfo = cit->second;
        for (std::set<signed_size_type>::const_iterator it
             = cinfo.turn_indices.begin();
             it != cinfo.turn_indices.end(); ++it)
        {
            if (! is_self_turn<overlay_intersection>(turns[*it]))
            {
                return false;
            }
        }

        return true;
    }
	void ComputeNotMergedMetrics() {
		double sum_fill = 0;

		for(auto it = original_clusters_.clusters_begin(); it != original_clusters_.clusters_end(); it++) {
			size_t orig_cluster_ind = it->first;
			if(original_clusters_.IsClusterSingleton(orig_cluster_ind))
				continue;

			// for each non trivial cluster we compute it "not merged"
			if(!IsClusterNotMerged(orig_cluster_ind))
				continue;

			metrics_.original_metrics.num_not_merged++;
			if(IsClusterBigAndSingletons(orig_cluster_ind))
				metrics_.original_metrics.num_nm_big_singletons++;

			size_t orig_cluster_size = original_clusters_.ClusterSize(orig_cluster_ind);
			double orig_cluster_fill = ComputeNotMergedFillin(orig_cluster_ind);

			sum_fill += orig_cluster_fill;
		}
		metrics_.avg_fillin = sum_fill / metrics_.original_metrics.num_not_merged;
	}
	void ComputeMaxClusterMetrics() {
		size_t max_orig_cluster = 0;
		for(auto it = original_clusters_.clusters_begin(); it != original_clusters_.clusters_end(); it++)
			if(metrics_.original_metrics.max_cluster < original_clusters_.ClusterSize(it->first)) {
				metrics_.original_metrics.max_cluster = original_clusters_.ClusterSize(it->first);
				max_orig_cluster = it->first;
			}

		for(auto it = constructed_clusters_.clusters_begin(); it != constructed_clusters_.clusters_end(); it++) {
			metrics_.constructed_metrics.max_cluster = max<size_t>(metrics_.constructed_metrics.max_cluster,
					constructed_clusters_.ClusterSize(it->first));
		}

		metrics_.max_cluster_fillin = ComputeNotMergedFillin(max_orig_cluster);
	}
Esempio n. 24
0
void Cluster::recPathToCluster(RootCluster *rootCluster, Clusters currentPath)
{
    // Reset cluster-cluster overlap exceptions.
    m_cluster_cluster_overlap_exceptions.clear();
    m_nodes_replaced_with_clusters.clear();
    m_overlap_replacement_map.clear();

    // Add this cluster to the path.
    currentPath.push_back(this);

    // Recusively all on each child cluster.
    for (unsigned i = 0; i < clusters.size(); ++i) 
    {
        clusters[i]->recPathToCluster(rootCluster, currentPath);
    }

    // And store the path to each child node.
    for (std::set<unsigned>::iterator it = nodes.begin();
            it != nodes.end(); ++it)
    {
        rootCluster->m_cluster_vectors_leading_to_nodes[*it].
                push_back(currentPath);
    }
}
double GuptaPotentialEnergy::EnergyValue(Clusters& cluster)
{
	int N = cluster.GetAtomsNumber();
	_atomEnergy.resize(N);
	double *dis = cluster.GetDistancePointer();
	double E = 0;
	vector<double> VEN(N,0);
	vector<double> PEN(N,0);
	
	Alloy alloy = cluster.GetAlloy();

	for (int i = 0; i < N - 1; i ++)
	{
		for (int j = i + 1; j < N; j ++)
		{
			double r = dis[ i * N + j ];
			int note1 = cluster.GetAtomAtIndex(i).GetNote();
			int note2 = cluster.GetAtomAtIndex(j).GetNote();
			Gupta_AtomParamter parameter = (Gupta_AtomParamter&)ReturnAtomParameter(alloy[note1],alloy[note2]);
			double FMJN = r / parameter.r0 - 1;
			double FMJV = parameter.A * exp( -parameter.P * FMJN );
			double FMJP  = parameter.Xi * parameter.Xi * exp( -2 * parameter.q *FMJN );

			VEN[i] += FMJV;
			VEN[j] += FMJV;
			PEN[i] += FMJP;
			PEN[j] += FMJP;
		}
	}
	for (int i = 0; i < N; i++)
	{		
		_atomEnergy[i] = VEN[i] - sqrt(PEN[i]);
		E += _atomEnergy[i];
	}

	cluster.SetEnergyVectorOfAtoms(_atomEnergy);
	cluster.SetEnergy(E);

	return E;
}
Esempio n. 26
0
Clusters * getClusters(Int_t Runs, Int_t dataType, Int_t frameType, Float_t energy) {
   run_energy = energy;

   DataInterface   * di = new DataInterface();
   Int_t             nClusters = kEventsPerRun * 5 * nLayers;
   Int_t             nHits = kEventsPerRun * 50;
   Int_t             nTracks = kEventsPerRun * 2;
   Bool_t            breakSignal = false;
   CalorimeterFrame *cf = new CalorimeterFrame();
   Clusters        * clusters = new Clusters(nClusters);
   Clusters        * trackerClusters = new Clusters(nClusters);
   Clusters        * allClusters = new Clusters(nClusters * Runs);
   Hits            * hits = new Hits(nHits);
   Hits            * eventIDs = new Hits(kEventsPerRun * sizeOfEventID);
   Int_t             eventID = -1;
   Hits            * trackerHits = new Hits(nHits);
   TRandom3        * gRandom = new TRandom3(0);

   for (Int_t i=0; i<Runs; i++) {

      cout << "Finding clusters " << i*kEventsPerRun << "->" << (i+1)*kEventsPerRun << " of " << Runs * kEventsPerRun << endl;

      if (dataType == kMC) {
         eventID = di->getMCFrame(i, cf);
         di->getEventIDs(i, eventIDs);
         cf->diffuseFrame(gRandom);
         hits = cf->findHits(eventID);
         clusters = hits->findClustersFromHits(); // badly optimized
         clusters->removeSmallClusters(2);

         clusters->matchWithEventIDs(eventIDs);
         eventIDs->Clear();
      }
      
      else if (dataType == kData) {
         di->getDataFrame(i, cf, energy);
         hits = cf->findHits();
         clusters = hits->findClustersFromHits();
         clusters->removeSmallClusters(2);
         clusters->removeAllClustersAfterLayer(8); // bad data in layer 10 and 11
      }
      
      clusters->Compress();
      
      if (clusters->GetEntriesFast() == 0) breakSignal = kTRUE; // to stop running

      for (Int_t j=0; j<clusters->GetEntriesFast(); j++) {
         allClusters->appendCluster(clusters->At(j));
      }

      cf->Reset();
      hits->clearHits();
      trackerHits->clearHits();
      clusters->clearClusters();
      trackerClusters->clearClusters();
      
      if (breakSignal) break;
   }


   delete cf;
   delete clusters;
   delete trackerClusters;
   delete hits;
   delete trackerHits;
   delete di;

   return allClusters;
}
Esempio n. 27
0
Tracks * getTracks(Int_t Runs, Int_t dataType, Int_t frameType, Float_t energy, Float_t *x, Float_t *y) {
   run_energy = energy;

   DataInterface   * di = new DataInterface();
   Misalign        * m = new Misalign();
   Int_t             nClusters = kEventsPerRun * 5 * nLayers;
   Int_t             nHits = kEventsPerRun * 50;
   Int_t             nTracks = kEventsPerRun * 2;
   Bool_t            breakSignal = false;
   CalorimeterFrame *cf = new CalorimeterFrame();
   Clusters        * clusters = new Clusters(nClusters);
   Clusters        * trackerClusters = new Clusters(nClusters);
   Hits            * hits = new Hits(nHits);
   Hits            * eventIDs = new Hits(kEventsPerRun * sizeOfEventID);
   Int_t             eventID = -1;
   Hits            * trackerHits = new Hits(nHits);
   Tracks          * calorimeterTracks = nullptr;
   Tracks          * trackerTracks = new Tracks(nTracks);
   Tracks          * allTracks = new Tracks(nTracks * Runs);
   TRandom3        * gRandom = new TRandom3(0);
   TStopwatch        t1, t2, t3, t4, t5, t6;
   ofstream          file("OutputFiles/efficiency.csv", ofstream::out | ofstream::app);
   Int_t             totalNumberOfFrames = 0;
   Int_t             tracksTotalNumberAfterRecon = 0;
   Int_t             tracksRemovedDueToBadChannels = 0;
   Int_t             tracksGivenToReconstruction = 0;
   Int_t             tracksRemovedDueToLeavingDetector = 0;
   Int_t             tracksRemovedDueToNuclearInteractions = 0;
   Int_t             clustersInFirstLayer = 0;
   Int_t             tracksRemovedDueToCollisions = 0;

   // file: np; number of reconstructed tracks; tracks after removeTracksLeavingDetector; tracks after removeTrackCollisions
   
   for (Int_t i=0; i<Runs; i++) {

      cout << "Finding track " << (i+1)*kEventsPerRun << " of " << Runs*kEventsPerRun << "... ";
      
      if (dataType == kMC) {
         t1.Start();

         eventID = di->getMCFrame(i, cf, x, y);
         di->getEventIDs(i, eventIDs);

         t1.Stop(); t2.Start();
         showDebug("Start diffuseFrame\n");

         cf->diffuseFrame(gRandom);

         showDebug("End diffuseFrame, start findHits\n");
         t2.Stop(); t3.Start();

         hits = cf->findHits(eventID);

         showDebug("Number of hits in frame: " << hits->GetEntriesFast() << endl);
         t3.Stop(); t4.Start();

         clusters = hits->findClustersFromHits(); // badly optimized
         
         cout << "Found " << clusters->GetEntriesInLayer(0) << " clusters in the first layer.\n";
         cout << "Found " << clusters->GetEntriesInLayer(1) << " clusters in the second layer.\n";

         clusters->removeSmallClusters(2);
         cout << "Found " << clusters->GetEntriesInLayer(0) << " clusters in the first layer after removeSmallClusters.\n";

         t4.Stop();

         clusters->matchWithEventIDs(eventIDs);
         eventIDs->Clear();
      }
      
      else if (dataType == kData) {
         t1.Start(); di->getDataFrame(i, cf, energy); t1.Stop();
         t3.Start(); hits = cf->findHits(); t3.Stop();
         t4.Start(); clusters = hits->findClustersFromHits(); t4.Stop();
         clusters->removeSmallClusters(2);
         clusters->removeAllClustersAfterLayer(8); // bad data in layer 10 and 11
         cout << "Found " << clusters->GetEntriesInLayer(0) << " clusters in the first layer.\n";
         cout << "Found " << clusters->GetEntriesInLayer(1) << " clusters in the second layer.\n";

         m->correctClusters(clusters);
      }
      
      t5.Start();
      calorimeterTracks = clusters->findCalorimeterTracks();
      t5.Stop();
      
      tracksTotalNumberAfterRecon += calorimeterTracks->GetEntries();

      if (calorimeterTracks->GetEntriesFast() == 0) breakSignal = kTRUE; // to stop running

      // Track improvements
      Int_t nTracksBefore = 0, nTracksAfter = 0;
      Int_t nIsInelastic = 0, nIsNotInelastic = 0;
      
      calorimeterTracks->extrapolateToLayer0();
      calorimeterTracks->splitSharedClusters();
      nTracksBefore = calorimeterTracks->GetEntries();
      calorimeterTracks->removeTracksLeavingDetector();
      nTracksAfter = calorimeterTracks->GetEntries();
      
      tracksRemovedDueToLeavingDetector += nTracksBefore - nTracksAfter;
      
      cout << "Of " << nTracksBefore << " tracks, " << nTracksBefore - nTracksAfter << " (" << 100* ( nTracksBefore - nTracksAfter) / ( (float) nTracksBefore ) << "%) were lost when leaving the detector.\n";
      
      nTracksBefore = calorimeterTracks->GetEntries();
      calorimeterTracks->removeTrackCollisions();
      nTracksAfter = calorimeterTracks->GetEntries();
      
      tracksRemovedDueToCollisions += nTracksBefore - nTracksAfter;

      if (kDataType == kData) {
         nTracksBefore = calorimeterTracks->GetEntries();
         calorimeterTracks->removeTracksEndingInBadChannels();
         nTracksAfter = calorimeterTracks->GetEntries();
         cout << "Of " << nTracksBefore << " tracks, " << nTracksBefore - nTracksAfter << " (" << 100* ( nTracksBefore - nTracksAfter) / ( (float) nTracksBefore ) << "%) were removed due to ending just before a bad channel.\n";
         tracksRemovedDueToBadChannels += nTracksBefore - nTracksAfter;

      }
      
      for (Int_t k=0; k<calorimeterTracks->GetEntriesFast(); k++) {
         if (calorimeterTracks->At(k)) {
            if (calorimeterTracks->At(k)->doesTrackEndAbruptly()) {
               nIsInelastic++;
            }
            else nIsNotInelastic++;
         }
      }
      
      tracksRemovedDueToNuclearInteractions += nIsInelastic;
      cout << "Of these, " << nIsInelastic << " end abruptly and " << nIsNotInelastic << " does not.\n";

      file << energy << " " << kEventsPerRun << " " << nTracksBefore << " " << nTracksAfter << " " << calorimeterTracks->GetEntries() << " " <<  nIsInelastic << " " << nIsNotInelastic << endl;

      // calorimeterTracks->retrogradeTrackImprovement(clusters);

      calorimeterTracks->Compress();
      calorimeterTracks->CompressClusters();
      
      for (Int_t j=0; j<calorimeterTracks->GetEntriesFast(); j++) {
         if (!calorimeterTracks->At(j)) continue;

         allTracks->appendTrack(calorimeterTracks->At(j));
         tracksGivenToReconstruction++;
      }

      allTracks->appendClustersWithoutTrack(clusters->getClustersWithoutTrack());

      cout << Form("Timing: getMCframe (%.2f sec), diffuseFrame (%.2f sec), findHits (%.2f sec), findClustersFromHits (%.2f sec), findTracks (%.2f sec)\n",
              t1.RealTime(), t2.RealTime(), t3.RealTime(), t4.RealTime(), t5.RealTime());

      cf->Reset();
      hits->clearHits();
      trackerHits->clearHits();
      clusters->clearClusters();
      trackerClusters->clearClusters();
      calorimeterTracks->Clear();
      trackerTracks->Clear();

      if (breakSignal) break;
   }
   printf("\033[1mTrack statics for article. Clusters found in first layer (= N protons) = %d. Total number of tracks found = %d. Total number of tracks given to reconstruction = %d. Tracks removed due to bad channels = %d. Tracks removed due to nuclear interactions = %d. Tracks removed due to leaving the detector laterally = %d. Tracks removed due to collisions = %d. Sum = %d.\033[0m\n", clustersInFirstLayer, tracksTotalNumberAfterRecon, tracksGivenToReconstruction, tracksRemovedDueToBadChannels, tracksRemovedDueToNuclearInteractions, tracksRemovedDueToLeavingDetector, tracksRemovedDueToCollisions, tracksGivenToReconstruction + tracksRemovedDueToBadChannels + tracksRemovedDueToLeavingDetector + tracksRemovedDueToCollisions); 

   file.close();

   delete cf;
   delete clusters;
   delete trackerClusters;
   delete hits;
   delete trackerHits;
   delete calorimeterTracks;
   delete trackerTracks;
   delete di;

   return allTracks;
}
Esempio n. 28
0
inline void get_ring_turn_info(TurnInfoMap& turn_info_map, Turns const& turns, Clusters const& clusters)
{
    typedef typename boost::range_value<Turns>::type turn_type;
    typedef typename turn_type::container_type container_type;

    static const operation_type target_operation
            = operation_from_overlay<OverlayType>::value;
    static const operation_type opposite_operation
            = target_operation == operation_union ? operation_intersection : operation_union;

    signed_size_type turn_index = 0;
    for (typename boost::range_iterator<Turns const>::type
            it = boost::begin(turns);
         it != boost::end(turns);
         ++it, turn_index++)
    {
        typename boost::range_value<Turns>::type const& turn = *it;

        bool const colocated_target = target_operation == operation_union
                ? turn.colocated_uu : turn.colocated_ii;
        bool const colocated_opp = target_operation == operation_union
                ? turn.colocated_ii : turn.colocated_uu;
        bool const both_opposite = turn.both(opposite_operation);

        bool const traversed
                = turn.operations[0].visited.finalized()
                || turn.operations[0].visited.rejected()
                || turn.operations[1].visited.finalized()
                || turn.operations[1].visited.rejected()
                || turn.both(operation_blocked)
                || turn.combination(opposite_operation, operation_blocked);

        bool is_closed = false;
        if (turn.cluster_id >= 0 && target_operation == operation_union)
        {
            typename Clusters::const_iterator mit = clusters.find(turn.cluster_id);
            BOOST_ASSERT(mit != clusters.end());

            cluster_info const& cinfo = mit->second;
            is_closed = cinfo.open_count == 0;
        }

        for (typename boost::range_iterator<container_type const>::type
                op_it = boost::begin(turn.operations);
            op_it != boost::end(turn.operations);
            ++op_it)
        {
            ring_identifier const ring_id
                (
                    op_it->seg_id.source_index,
                    op_it->seg_id.multi_index,
                    op_it->seg_id.ring_index
                );

            if (traversed || is_closed || ! op_it->enriched.startable)
            {
                turn_info_map[ring_id].has_traversed_turn = true;
            }
            else if (both_opposite && colocated_target)
            {
                // For union: ii, colocated with a uu
                // For example, two interior rings touch where two exterior rings also touch.
                // The interior rings are not yet traversed, and should be taken from the input

                // For intersection: uu, colocated with an ii
                // unless it is two interior inner rings colocated with a uu

                // So don't set has_traversed_turn here
            }
            else if (both_opposite && ! is_self_turn<OverlayType>(turn))
            {
                // For union, mark any ring with a ii turn as traversed
                // For intersection, any uu - but not if it is a self-turn
                turn_info_map[ring_id].has_traversed_turn = true;
            }
            else if (colocated_opp && ! colocated_target)
            {
                // For union, a turn colocated with ii and NOT with uu/ux
                // For intersection v.v.
                turn_info_map[ring_id].has_traversed_turn = true;
            }
        }
    }
}
Esempio n. 29
0
void RootCluster::calculateClusterPathsToEachNode(size_t nodesCount)
{
    m_cluster_vectors_leading_to_nodes.clear();
    m_cluster_vectors_leading_to_nodes.resize(nodesCount);

    recPathToCluster(this, Clusters());

    for (unsigned i = 0; i < m_cluster_vectors_leading_to_nodes.size(); ++i) 
    {
        size_t paths = m_cluster_vectors_leading_to_nodes[i].size();
        for (size_t j = 1; j < paths; ++j)
        {
            for (size_t k = 0; k < j; ++k)
            {
                // For each pair of paths.

                // Find the lowest common ancestor by finding where the two
                // paths from the root cluster to node i diverge.
                Clusters pathJ = m_cluster_vectors_leading_to_nodes[i][j];
                Clusters pathK = m_cluster_vectors_leading_to_nodes[i][k];
                size_t lcaIndex = 0;
                while ((lcaIndex < pathJ.size()) && 
                       (lcaIndex < pathK.size()) &&
                        (pathJ[lcaIndex] == pathK[lcaIndex]))
                {
                    ++lcaIndex;
                }
                COLA_ASSERT(lcaIndex > 0);

                // lcaIndex will be the clusters/nodes that need to overlap
                // due to these two paths to node i.
                size_t lcaChildJIndex = i;
                size_t lcaChildKIndex = i;
                Cluster *lcaChildJCluster = nullptr;
                Cluster *lcaChildKCluster = nullptr;
                
                // lcaIndex < path{J,K}.size() means the child J or K of 
                // the lca is a Cluster.   At least one of them will always
                // be a cluster.
                COLA_ASSERT((lcaIndex < pathJ.size()) ||
                        (lcaIndex < pathK.size()));
                if (lcaIndex < pathJ.size())
                {
                    lcaChildJCluster = pathJ[lcaIndex];
                    lcaChildJIndex = lcaChildJCluster->clusterVarId;
                }
                if (lcaIndex < pathK.size())
                {
                    lcaChildKCluster = pathK[lcaIndex];
                    lcaChildKIndex = lcaChildKCluster->clusterVarId;
                }

                // We want to exclude the overlapping children of the lca 
                // from having non-overlap constraints generated for them
                // (siblings of a particular cluster usually have 
                // non-overlap constraints generated for them).
                Cluster *lcaCluster = pathJ[lcaIndex - 1];
                lcaCluster->m_cluster_cluster_overlap_exceptions.insert(
                        ShapePair(lcaChildJIndex, lcaChildKIndex));

                if (lcaChildJCluster)
                {
                    // In cluster J, replace node i with cluster K for the 
                    // purpose of non-overlap with siblings, and remember 
                    // this replacement so we can still generate non-overlap 
                    // constraints between multiple nodes that are children
                    // of the same overlapping clusters.
                    lcaChildJCluster->m_overlap_replacement_map[i] =
                            lcaChildKCluster;
                    lcaChildJCluster->m_nodes_replaced_with_clusters.insert(i);
                }

                if (lcaChildKCluster)
                {
                    // In cluster K, replace node i with cluster J for the 
                    // purpose of non-overlap with siblings, and remember 
                    // this replacement so we can still generate non-overlap 
                    // constraints between multiple nodes that are children
                    // of the same overlapping clusters.
                    lcaChildKCluster->m_overlap_replacement_map[i] =
                            lcaChildJCluster;
                    lcaChildKCluster->m_nodes_replaced_with_clusters.insert(i);
                }
            }
        }
    }
}
	void ComputeOriginalSingletons() {
		for(auto it = original_clusters_.clusters_begin(); it != original_clusters_.clusters_end(); it++)
			if(original_clusters_.IsClusterSingleton(it->first))
				metrics_.original_metrics.num_singletons++;
	}